]> git.ipfire.org Git - people/teissler/ipfire-2.x.git/blame - src/patches/grsecurity-2.9.1-3.10.10-201308292131.patch
kernel: update to 3.10.10.
[people/teissler/ipfire-2.x.git] / src / patches / grsecurity-2.9.1-3.10.10-201308292131.patch
CommitLineData
bb5f0bf8
AF
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index b89a739..79768fb 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -48,14 +51,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -69,9 +75,11 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52+TRACEEVENT-CFLAGS
53 aconf
54 af_names.h
55 aic7*reg.h*
56@@ -80,6 +88,7 @@ aic7*seq.h*
57 aicasm
58 aicdb.h*
59 altivec*.c
60+ashldi3.S
61 asm-offsets.h
62 asm_offsets.h
63 autoconf.h*
64@@ -92,19 +101,24 @@ bounds.h
65 bsetup
66 btfixupprep
67 build
68+builtin-policy.h
69 bvmlinux
70 bzImage*
71 capability_names.h
72 capflags.c
73 classlist.h*
74+clut_vga16.c
75+common-cmds.h
76 comp*.log
77 compile.h*
78 conf
79 config
80 config-*
81 config_data.h*
82+config.c
83 config.mak
84 config.mak.autogen
85+config.tmp
86 conmakehash
87 consolemap_deftbl.c*
88 cpustr.h
89@@ -115,9 +129,11 @@ devlist.h*
90 dnotify_test
91 docproc
92 dslm
93+dtc-lexer.lex.c
94 elf2ecoff
95 elfconfig.h*
96 evergreen_reg_safe.h
97+exception_policy.conf
98 fixdep
99 flask.h
100 fore200e_mkfirm
101@@ -125,12 +141,15 @@ fore200e_pca_fw.c*
102 gconf
103 gconf.glade.h
104 gen-devlist
105+gen-kdb_cmds.c
106 gen_crc32table
107 gen_init_cpio
108 generated
109 genheaders
110 genksyms
111 *_gray256.c
112+hash
113+hid-example
114 hpet_example
115 hugepage-mmap
116 hugepage-shm
117@@ -145,14 +164,14 @@ int32.c
118 int4.c
119 int8.c
120 kallsyms
121-kconfig
122+kern_constants.h
123 keywords.c
124 ksym.c*
125 ksym.h*
126 kxgettext
127 lex.c
128 lex.*.c
129-linux
130+lib1funcs.S
131 logo_*.c
132 logo_*_clut224.c
133 logo_*_mono.c
134@@ -162,14 +181,15 @@ mach-types.h
135 machtypes.h
136 map
137 map_hugetlb
138-media
139 mconf
140+mdp
141 miboot*
142 mk_elfconfig
143 mkboot
144 mkbugboot
145 mkcpustr
146 mkdep
147+mkpiggy
148 mkprep
149 mkregtable
150 mktables
151@@ -185,6 +205,8 @@ oui.c*
152 page-types
153 parse.c
154 parse.h
155+parse-events*
156+pasyms.h
157 patches*
158 pca200e.bin
159 pca200e_ecd.bin2
160@@ -194,6 +216,7 @@ perf-archive
161 piggyback
162 piggy.gzip
163 piggy.S
164+pmu-*
165 pnmtologo
166 ppc_defs.h*
167 pss_boot.h
168@@ -203,7 +226,10 @@ r200_reg_safe.h
169 r300_reg_safe.h
170 r420_reg_safe.h
171 r600_reg_safe.h
172+realmode.lds
173+realmode.relocs
174 recordmcount
175+regdb.c
176 relocs
177 rlim_names.h
178 rn50_reg_safe.h
179@@ -213,8 +239,12 @@ series
180 setup
181 setup.bin
182 setup.elf
183+signing_key*
184+size_overflow_hash.h
185 sImage
186+slabinfo
187 sm_tbl*
188+sortextable
189 split-include
190 syscalltab.h
191 tables.c
192@@ -224,6 +254,7 @@ tftpboot.img
193 timeconst.h
194 times.h*
195 trix_boot.h
196+user_constants.h
197 utsrelease.h*
198 vdso-syms.lds
199 vdso.lds
200@@ -235,13 +266,17 @@ vdso32.lds
201 vdso32.so.dbg
202 vdso64.lds
203 vdso64.so.dbg
204+vdsox32.lds
205+vdsox32-syms.lds
206 version.h*
207 vmImage
208 vmlinux
209 vmlinux-*
210 vmlinux.aout
211 vmlinux.bin.all
212+vmlinux.bin.bz2
213 vmlinux.lds
214+vmlinux.relocs
215 vmlinuz
216 voffset.h
217 vsyscall.lds
218@@ -249,9 +284,12 @@ vsyscall_32.lds
219 wanxlfw.inc
220 uImage
221 unifdef
222+utsrelease.h
223 wakeup.bin
224 wakeup.elf
225 wakeup.lds
226+x509*
227 zImage*
228 zconf.hash.c
229+zconf.lex.c
230 zoffset.h
231diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
232index 2fe6e76..889ee23 100644
233--- a/Documentation/kernel-parameters.txt
234+++ b/Documentation/kernel-parameters.txt
235@@ -976,6 +976,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
236 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
237 Default: 1024
238
239+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
240+ ignore grsecurity's /proc restrictions
241+
242+
243 hashdist= [KNL,NUMA] Large hashes allocated during boot
244 are distributed across NUMA nodes. Defaults on
245 for 64-bit NUMA, off otherwise.
246@@ -1928,6 +1932,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
247 noexec=on: enable non-executable mappings (default)
248 noexec=off: disable non-executable mappings
249
250+ nopcid [X86-64]
251+ Disable PCID (Process-Context IDentifier) even if it
252+ is supported by the processor.
253+
254 nosmap [X86]
255 Disable SMAP (Supervisor Mode Access Prevention)
256 even if it is supported by processor.
257@@ -2195,6 +2203,25 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
258 the specified number of seconds. This is to be used if
259 your oopses keep scrolling off the screen.
260
261+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
262+ virtualization environments that don't cope well with the
263+ expand down segment used by UDEREF on X86-32 or the frequent
264+ page table updates on X86-64.
265+
266+ pax_sanitize_slab=
267+ 0/1 to disable/enable slab object sanitization (enabled by
268+ default).
269+
270+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
271+
272+ pax_extra_latent_entropy
273+ Enable a very simple form of latent entropy extraction
274+ from the first 4GB of memory as the bootmem allocator
275+ passes the memory pages to the buddy allocator.
276+
277+ pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
278+ when the processor supports PCID.
279+
280 pcbit= [HW,ISDN]
281
282 pcd. [PARIDE]
283diff --git a/Makefile b/Makefile
e2b79cd1 284index b119684..13ac256 100644
bb5f0bf8
AF
285--- a/Makefile
286+++ b/Makefile
287@@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
288
289 HOSTCC = gcc
290 HOSTCXX = g++
291-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
292-HOSTCXXFLAGS = -O2
293+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
294+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
295+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
296
297 # Decide whether to build built-in, modular, or both.
298 # Normally, just do built-in.
299@@ -414,8 +415,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
300 # Rules shared between *config targets and build targets
301
302 # Basic helpers built in scripts/
303-PHONY += scripts_basic
304-scripts_basic:
305+PHONY += scripts_basic gcc-plugins
306+scripts_basic: gcc-plugins
307 $(Q)$(MAKE) $(build)=scripts/basic
308 $(Q)rm -f .tmp_quiet_recordmcount
309
310@@ -576,6 +577,65 @@ else
311 KBUILD_CFLAGS += -O2
312 endif
313
314+ifndef DISABLE_PAX_PLUGINS
315+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
316+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
317+else
318+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
319+endif
320+ifneq ($(PLUGINCC),)
321+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
322+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
323+endif
324+ifdef CONFIG_PAX_MEMORY_STACKLEAK
325+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
326+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
327+endif
328+ifdef CONFIG_KALLOCSTAT_PLUGIN
329+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
330+endif
331+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
332+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
333+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
334+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
335+endif
336+ifdef CONFIG_CHECKER_PLUGIN
337+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
338+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
339+endif
340+endif
341+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
342+ifdef CONFIG_PAX_SIZE_OVERFLOW
343+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
344+endif
345+ifdef CONFIG_PAX_LATENT_ENTROPY
346+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
347+endif
348+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
349+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
350+endif
351+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
352+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
353+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
354+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
355+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN
356+ifeq ($(KBUILD_EXTMOD),)
357+gcc-plugins:
358+ $(Q)$(MAKE) $(build)=tools/gcc
359+else
360+gcc-plugins: ;
361+endif
362+else
363+gcc-plugins:
364+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
365+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
366+else
367+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
368+endif
369+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
370+endif
371+endif
372+
373 include $(srctree)/arch/$(SRCARCH)/Makefile
374
375 ifdef CONFIG_READABLE_ASM
376@@ -733,7 +793,7 @@ export mod_sign_cmd
377
378
379 ifeq ($(KBUILD_EXTMOD),)
380-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
381+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
382
383 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
384 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
385@@ -782,6 +842,8 @@ endif
386
387 # The actual objects are generated when descending,
388 # make sure no implicit rule kicks in
389+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
390+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
391 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
392
393 # Handle descending into subdirectories listed in $(vmlinux-dirs)
394@@ -791,7 +853,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
395 # Error messages still appears in the original language
396
397 PHONY += $(vmlinux-dirs)
398-$(vmlinux-dirs): prepare scripts
399+$(vmlinux-dirs): gcc-plugins prepare scripts
400 $(Q)$(MAKE) $(build)=$@
401
402 # Store (new) KERNELRELASE string in include/config/kernel.release
403@@ -835,6 +897,7 @@ prepare0: archprepare FORCE
404 $(Q)$(MAKE) $(build)=.
405
406 # All the preparing..
407+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
408 prepare: prepare0
409
410 # Generate some files
411@@ -942,6 +1005,8 @@ all: modules
412 # using awk while concatenating to the final file.
413
414 PHONY += modules
415+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
416+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
417 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
418 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
419 @$(kecho) ' Building modules, stage 2.';
420@@ -957,7 +1022,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
421
422 # Target to prepare building external modules
423 PHONY += modules_prepare
424-modules_prepare: prepare scripts
425+modules_prepare: gcc-plugins prepare scripts
426
427 # Target to install modules
428 PHONY += modules_install
429@@ -1023,7 +1088,7 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
430 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
431 signing_key.priv signing_key.x509 x509.genkey \
432 extra_certificates signing_key.x509.keyid \
433- signing_key.x509.signer
434+ signing_key.x509.signer tools/gcc/size_overflow_hash.h
435
436 # clean - Delete most, but leave enough to build external modules
437 #
438@@ -1063,6 +1128,7 @@ distclean: mrproper
439 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
440 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
441 -o -name '.*.rej' \
442+ -o -name '.*.rej' -o -name '*.so' \
443 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
444 -type f -print | xargs rm -f
445
446@@ -1223,6 +1289,8 @@ PHONY += $(module-dirs) modules
447 $(module-dirs): crmodverdir $(objtree)/Module.symvers
448 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
449
450+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
451+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
452 modules: $(module-dirs)
453 @$(kecho) ' Building modules, stage 2.';
454 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
455@@ -1359,17 +1427,21 @@ else
456 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
457 endif
458
459-%.s: %.c prepare scripts FORCE
460+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
461+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
462+%.s: %.c gcc-plugins prepare scripts FORCE
463 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
464 %.i: %.c prepare scripts FORCE
465 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
466-%.o: %.c prepare scripts FORCE
467+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
468+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
469+%.o: %.c gcc-plugins prepare scripts FORCE
470 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
471 %.lst: %.c prepare scripts FORCE
472 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
473-%.s: %.S prepare scripts FORCE
474+%.s: %.S gcc-plugins prepare scripts FORCE
475 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
476-%.o: %.S prepare scripts FORCE
477+%.o: %.S gcc-plugins prepare scripts FORCE
478 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
479 %.symtypes: %.c prepare scripts FORCE
480 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
481@@ -1379,11 +1451,15 @@ endif
482 $(cmd_crmodverdir)
483 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
484 $(build)=$(build-dir)
485-%/: prepare scripts FORCE
486+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
487+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
488+%/: gcc-plugins prepare scripts FORCE
489 $(cmd_crmodverdir)
490 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
491 $(build)=$(build-dir)
492-%.ko: prepare scripts FORCE
493+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
494+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
495+%.ko: gcc-plugins prepare scripts FORCE
496 $(cmd_crmodverdir)
497 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
498 $(build)=$(build-dir) $(@:.ko=.o)
499diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
500index c2cbe4f..f7264b4 100644
501--- a/arch/alpha/include/asm/atomic.h
502+++ b/arch/alpha/include/asm/atomic.h
503@@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
504 #define atomic_dec(v) atomic_sub(1,(v))
505 #define atomic64_dec(v) atomic64_sub(1,(v))
506
507+#define atomic64_read_unchecked(v) atomic64_read(v)
508+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
509+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
510+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
511+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
512+#define atomic64_inc_unchecked(v) atomic64_inc(v)
513+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
514+#define atomic64_dec_unchecked(v) atomic64_dec(v)
515+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
516+
517 #define smp_mb__before_atomic_dec() smp_mb()
518 #define smp_mb__after_atomic_dec() smp_mb()
519 #define smp_mb__before_atomic_inc() smp_mb()
520diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
521index ad368a9..fbe0f25 100644
522--- a/arch/alpha/include/asm/cache.h
523+++ b/arch/alpha/include/asm/cache.h
524@@ -4,19 +4,19 @@
525 #ifndef __ARCH_ALPHA_CACHE_H
526 #define __ARCH_ALPHA_CACHE_H
527
528+#include <linux/const.h>
529
530 /* Bytes per L1 (data) cache line. */
531 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
532-# define L1_CACHE_BYTES 64
533 # define L1_CACHE_SHIFT 6
534 #else
535 /* Both EV4 and EV5 are write-through, read-allocate,
536 direct-mapped, physical.
537 */
538-# define L1_CACHE_BYTES 32
539 # define L1_CACHE_SHIFT 5
540 #endif
541
542+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
543 #define SMP_CACHE_BYTES L1_CACHE_BYTES
544
545 #endif
546diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
547index 968d999..d36b2df 100644
548--- a/arch/alpha/include/asm/elf.h
549+++ b/arch/alpha/include/asm/elf.h
550@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
551
552 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
553
554+#ifdef CONFIG_PAX_ASLR
555+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
556+
557+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
558+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
559+#endif
560+
561 /* $0 is set by ld.so to a pointer to a function which might be
562 registered using atexit. This provides a mean for the dynamic
563 linker to call DT_FINI functions for shared libraries that have
564diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
565index bc2a0da..8ad11ee 100644
566--- a/arch/alpha/include/asm/pgalloc.h
567+++ b/arch/alpha/include/asm/pgalloc.h
568@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
569 pgd_set(pgd, pmd);
570 }
571
572+static inline void
573+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
574+{
575+ pgd_populate(mm, pgd, pmd);
576+}
577+
578 extern pgd_t *pgd_alloc(struct mm_struct *mm);
579
580 static inline void
581diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
582index 81a4342..348b927 100644
583--- a/arch/alpha/include/asm/pgtable.h
584+++ b/arch/alpha/include/asm/pgtable.h
585@@ -102,6 +102,17 @@ struct vm_area_struct;
586 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
587 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
588 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
589+
590+#ifdef CONFIG_PAX_PAGEEXEC
591+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
592+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
593+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
594+#else
595+# define PAGE_SHARED_NOEXEC PAGE_SHARED
596+# define PAGE_COPY_NOEXEC PAGE_COPY
597+# define PAGE_READONLY_NOEXEC PAGE_READONLY
598+#endif
599+
600 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
601
602 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
603diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
604index 2fd00b7..cfd5069 100644
605--- a/arch/alpha/kernel/module.c
606+++ b/arch/alpha/kernel/module.c
607@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
608
609 /* The small sections were sorted to the end of the segment.
610 The following should definitely cover them. */
611- gp = (u64)me->module_core + me->core_size - 0x8000;
612+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
613 got = sechdrs[me->arch.gotsecindex].sh_addr;
614
615 for (i = 0; i < n; i++) {
616diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
617index b9e37ad..44c24e7 100644
618--- a/arch/alpha/kernel/osf_sys.c
619+++ b/arch/alpha/kernel/osf_sys.c
620@@ -1297,10 +1297,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
621 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
622
623 static unsigned long
624-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
625- unsigned long limit)
626+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
627+ unsigned long limit, unsigned long flags)
628 {
629 struct vm_unmapped_area_info info;
630+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
631
632 info.flags = 0;
633 info.length = len;
634@@ -1308,6 +1309,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
635 info.high_limit = limit;
636 info.align_mask = 0;
637 info.align_offset = 0;
638+ info.threadstack_offset = offset;
639 return vm_unmapped_area(&info);
640 }
641
642@@ -1340,20 +1342,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
643 merely specific addresses, but regions of memory -- perhaps
644 this feature should be incorporated into all ports? */
645
646+#ifdef CONFIG_PAX_RANDMMAP
647+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
648+#endif
649+
650 if (addr) {
651- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
652+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
653 if (addr != (unsigned long) -ENOMEM)
654 return addr;
655 }
656
657 /* Next, try allocating at TASK_UNMAPPED_BASE. */
658- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
659- len, limit);
660+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
661+
662 if (addr != (unsigned long) -ENOMEM)
663 return addr;
664
665 /* Finally, try allocating in low memory. */
666- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
667+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
668
669 return addr;
670 }
671diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
672index 0c4132d..88f0d53 100644
673--- a/arch/alpha/mm/fault.c
674+++ b/arch/alpha/mm/fault.c
675@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
676 __reload_thread(pcb);
677 }
678
679+#ifdef CONFIG_PAX_PAGEEXEC
680+/*
681+ * PaX: decide what to do with offenders (regs->pc = fault address)
682+ *
683+ * returns 1 when task should be killed
684+ * 2 when patched PLT trampoline was detected
685+ * 3 when unpatched PLT trampoline was detected
686+ */
687+static int pax_handle_fetch_fault(struct pt_regs *regs)
688+{
689+
690+#ifdef CONFIG_PAX_EMUPLT
691+ int err;
692+
693+ do { /* PaX: patched PLT emulation #1 */
694+ unsigned int ldah, ldq, jmp;
695+
696+ err = get_user(ldah, (unsigned int *)regs->pc);
697+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
698+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
699+
700+ if (err)
701+ break;
702+
703+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
704+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
705+ jmp == 0x6BFB0000U)
706+ {
707+ unsigned long r27, addr;
708+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
709+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
710+
711+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
712+ err = get_user(r27, (unsigned long *)addr);
713+ if (err)
714+ break;
715+
716+ regs->r27 = r27;
717+ regs->pc = r27;
718+ return 2;
719+ }
720+ } while (0);
721+
722+ do { /* PaX: patched PLT emulation #2 */
723+ unsigned int ldah, lda, br;
724+
725+ err = get_user(ldah, (unsigned int *)regs->pc);
726+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
727+ err |= get_user(br, (unsigned int *)(regs->pc+8));
728+
729+ if (err)
730+ break;
731+
732+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
733+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
734+ (br & 0xFFE00000U) == 0xC3E00000U)
735+ {
736+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
737+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
738+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
739+
740+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
741+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
742+ return 2;
743+ }
744+ } while (0);
745+
746+ do { /* PaX: unpatched PLT emulation */
747+ unsigned int br;
748+
749+ err = get_user(br, (unsigned int *)regs->pc);
750+
751+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
752+ unsigned int br2, ldq, nop, jmp;
753+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
754+
755+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
756+ err = get_user(br2, (unsigned int *)addr);
757+ err |= get_user(ldq, (unsigned int *)(addr+4));
758+ err |= get_user(nop, (unsigned int *)(addr+8));
759+ err |= get_user(jmp, (unsigned int *)(addr+12));
760+ err |= get_user(resolver, (unsigned long *)(addr+16));
761+
762+ if (err)
763+ break;
764+
765+ if (br2 == 0xC3600000U &&
766+ ldq == 0xA77B000CU &&
767+ nop == 0x47FF041FU &&
768+ jmp == 0x6B7B0000U)
769+ {
770+ regs->r28 = regs->pc+4;
771+ regs->r27 = addr+16;
772+ regs->pc = resolver;
773+ return 3;
774+ }
775+ }
776+ } while (0);
777+#endif
778+
779+ return 1;
780+}
781+
782+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
783+{
784+ unsigned long i;
785+
786+ printk(KERN_ERR "PAX: bytes at PC: ");
787+ for (i = 0; i < 5; i++) {
788+ unsigned int c;
789+ if (get_user(c, (unsigned int *)pc+i))
790+ printk(KERN_CONT "???????? ");
791+ else
792+ printk(KERN_CONT "%08x ", c);
793+ }
794+ printk("\n");
795+}
796+#endif
797
798 /*
799 * This routine handles page faults. It determines the address,
800@@ -133,8 +251,29 @@ retry:
801 good_area:
802 si_code = SEGV_ACCERR;
803 if (cause < 0) {
804- if (!(vma->vm_flags & VM_EXEC))
805+ if (!(vma->vm_flags & VM_EXEC)) {
806+
807+#ifdef CONFIG_PAX_PAGEEXEC
808+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
809+ goto bad_area;
810+
811+ up_read(&mm->mmap_sem);
812+ switch (pax_handle_fetch_fault(regs)) {
813+
814+#ifdef CONFIG_PAX_EMUPLT
815+ case 2:
816+ case 3:
817+ return;
818+#endif
819+
820+ }
821+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
822+ do_group_exit(SIGKILL);
823+#else
824 goto bad_area;
825+#endif
826+
827+ }
828 } else if (!cause) {
829 /* Allow reads even for write-only mappings */
830 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
831diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
832index 18a9f5e..ca910b7 100644
833--- a/arch/arm/Kconfig
834+++ b/arch/arm/Kconfig
835@@ -1766,7 +1766,7 @@ config ALIGNMENT_TRAP
836
837 config UACCESS_WITH_MEMCPY
838 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
839- depends on MMU
840+ depends on MMU && !PAX_MEMORY_UDEREF
841 default y if CPU_FEROCEON
842 help
843 Implement faster copy_to_user and clear_user methods for CPU
844diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
845index da1c77d..2ee6056 100644
846--- a/arch/arm/include/asm/atomic.h
847+++ b/arch/arm/include/asm/atomic.h
848@@ -17,17 +17,35 @@
849 #include <asm/barrier.h>
850 #include <asm/cmpxchg.h>
851
852+#ifdef CONFIG_GENERIC_ATOMIC64
853+#include <asm-generic/atomic64.h>
854+#endif
855+
856 #define ATOMIC_INIT(i) { (i) }
857
858 #ifdef __KERNEL__
859
860+#define _ASM_EXTABLE(from, to) \
861+" .pushsection __ex_table,\"a\"\n"\
862+" .align 3\n" \
863+" .long " #from ", " #to"\n" \
864+" .popsection"
865+
866 /*
867 * On ARM, ordinary assignment (str instruction) doesn't clear the local
868 * strex/ldrex monitor on some implementations. The reason we can use it for
869 * atomic_set() is the clrex or dummy strex done on every exception return.
870 */
871 #define atomic_read(v) (*(volatile int *)&(v)->counter)
872+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
873+{
874+ return v->counter;
875+}
876 #define atomic_set(v,i) (((v)->counter) = (i))
877+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
878+{
879+ v->counter = i;
880+}
881
882 #if __LINUX_ARM_ARCH__ >= 6
883
884@@ -42,6 +60,35 @@ static inline void atomic_add(int i, atomic_t *v)
885 int result;
886
887 __asm__ __volatile__("@ atomic_add\n"
888+"1: ldrex %1, [%3]\n"
889+" adds %0, %1, %4\n"
890+
891+#ifdef CONFIG_PAX_REFCOUNT
892+" bvc 3f\n"
893+"2: bkpt 0xf103\n"
894+"3:\n"
895+#endif
896+
897+" strex %1, %0, [%3]\n"
898+" teq %1, #0\n"
899+" bne 1b"
900+
901+#ifdef CONFIG_PAX_REFCOUNT
902+"\n4:\n"
903+ _ASM_EXTABLE(2b, 4b)
904+#endif
905+
906+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
907+ : "r" (&v->counter), "Ir" (i)
908+ : "cc");
909+}
910+
911+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
912+{
913+ unsigned long tmp;
914+ int result;
915+
916+ __asm__ __volatile__("@ atomic_add_unchecked\n"
917 "1: ldrex %0, [%3]\n"
918 " add %0, %0, %4\n"
919 " strex %1, %0, [%3]\n"
920@@ -60,6 +107,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
921 smp_mb();
922
923 __asm__ __volatile__("@ atomic_add_return\n"
924+"1: ldrex %1, [%3]\n"
925+" adds %0, %1, %4\n"
926+
927+#ifdef CONFIG_PAX_REFCOUNT
928+" bvc 3f\n"
929+" mov %0, %1\n"
930+"2: bkpt 0xf103\n"
931+"3:\n"
932+#endif
933+
934+" strex %1, %0, [%3]\n"
935+" teq %1, #0\n"
936+" bne 1b"
937+
938+#ifdef CONFIG_PAX_REFCOUNT
939+"\n4:\n"
940+ _ASM_EXTABLE(2b, 4b)
941+#endif
942+
943+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
944+ : "r" (&v->counter), "Ir" (i)
945+ : "cc");
946+
947+ smp_mb();
948+
949+ return result;
950+}
951+
952+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
953+{
954+ unsigned long tmp;
955+ int result;
956+
957+ smp_mb();
958+
959+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
960 "1: ldrex %0, [%3]\n"
961 " add %0, %0, %4\n"
962 " strex %1, %0, [%3]\n"
963@@ -80,6 +163,35 @@ static inline void atomic_sub(int i, atomic_t *v)
964 int result;
965
966 __asm__ __volatile__("@ atomic_sub\n"
967+"1: ldrex %1, [%3]\n"
968+" subs %0, %1, %4\n"
969+
970+#ifdef CONFIG_PAX_REFCOUNT
971+" bvc 3f\n"
972+"2: bkpt 0xf103\n"
973+"3:\n"
974+#endif
975+
976+" strex %1, %0, [%3]\n"
977+" teq %1, #0\n"
978+" bne 1b"
979+
980+#ifdef CONFIG_PAX_REFCOUNT
981+"\n4:\n"
982+ _ASM_EXTABLE(2b, 4b)
983+#endif
984+
985+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
986+ : "r" (&v->counter), "Ir" (i)
987+ : "cc");
988+}
989+
990+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
991+{
992+ unsigned long tmp;
993+ int result;
994+
995+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
996 "1: ldrex %0, [%3]\n"
997 " sub %0, %0, %4\n"
998 " strex %1, %0, [%3]\n"
999@@ -98,11 +210,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1000 smp_mb();
1001
1002 __asm__ __volatile__("@ atomic_sub_return\n"
1003-"1: ldrex %0, [%3]\n"
1004-" sub %0, %0, %4\n"
1005+"1: ldrex %1, [%3]\n"
1006+" subs %0, %1, %4\n"
1007+
1008+#ifdef CONFIG_PAX_REFCOUNT
1009+" bvc 3f\n"
1010+" mov %0, %1\n"
1011+"2: bkpt 0xf103\n"
1012+"3:\n"
1013+#endif
1014+
1015 " strex %1, %0, [%3]\n"
1016 " teq %1, #0\n"
1017 " bne 1b"
1018+
1019+#ifdef CONFIG_PAX_REFCOUNT
1020+"\n4:\n"
1021+ _ASM_EXTABLE(2b, 4b)
1022+#endif
1023+
1024 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1025 : "r" (&v->counter), "Ir" (i)
1026 : "cc");
1027@@ -134,6 +260,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1028 return oldval;
1029 }
1030
1031+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1032+{
1033+ unsigned long oldval, res;
1034+
1035+ smp_mb();
1036+
1037+ do {
1038+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1039+ "ldrex %1, [%3]\n"
1040+ "mov %0, #0\n"
1041+ "teq %1, %4\n"
1042+ "strexeq %0, %5, [%3]\n"
1043+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1044+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1045+ : "cc");
1046+ } while (res);
1047+
1048+ smp_mb();
1049+
1050+ return oldval;
1051+}
1052+
1053 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1054 {
1055 unsigned long tmp, tmp2;
1056@@ -167,7 +315,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
1057
1058 return val;
1059 }
1060+
1061+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1062+{
1063+ return atomic_add_return(i, v);
1064+}
1065+
1066 #define atomic_add(i, v) (void) atomic_add_return(i, v)
1067+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1068+{
1069+ (void) atomic_add_return(i, v);
1070+}
1071
1072 static inline int atomic_sub_return(int i, atomic_t *v)
1073 {
1074@@ -182,6 +340,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1075 return val;
1076 }
1077 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
1078+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1079+{
1080+ (void) atomic_sub_return(i, v);
1081+}
1082
1083 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1084 {
1085@@ -197,6 +359,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1086 return ret;
1087 }
1088
1089+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1090+{
1091+ return atomic_cmpxchg(v, old, new);
1092+}
1093+
1094 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1095 {
1096 unsigned long flags;
1097@@ -209,6 +376,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1098 #endif /* __LINUX_ARM_ARCH__ */
1099
1100 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1101+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1102+{
1103+ return xchg(&v->counter, new);
1104+}
1105
1106 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1107 {
1108@@ -221,11 +392,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1109 }
1110
1111 #define atomic_inc(v) atomic_add(1, v)
1112+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1113+{
1114+ atomic_add_unchecked(1, v);
1115+}
1116 #define atomic_dec(v) atomic_sub(1, v)
1117+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1118+{
1119+ atomic_sub_unchecked(1, v);
1120+}
1121
1122 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1123+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1124+{
1125+ return atomic_add_return_unchecked(1, v) == 0;
1126+}
1127 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1128 #define atomic_inc_return(v) (atomic_add_return(1, v))
1129+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1130+{
1131+ return atomic_add_return_unchecked(1, v);
1132+}
1133 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1134 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1135
1136@@ -241,6 +428,14 @@ typedef struct {
1137 u64 __aligned(8) counter;
1138 } atomic64_t;
1139
1140+#ifdef CONFIG_PAX_REFCOUNT
1141+typedef struct {
1142+ u64 __aligned(8) counter;
1143+} atomic64_unchecked_t;
1144+#else
1145+typedef atomic64_t atomic64_unchecked_t;
1146+#endif
1147+
1148 #define ATOMIC64_INIT(i) { (i) }
1149
1150 #ifdef CONFIG_ARM_LPAE
1151@@ -257,6 +452,19 @@ static inline u64 atomic64_read(const atomic64_t *v)
1152 return result;
1153 }
1154
1155+static inline u64 atomic64_read_unchecked(const atomic64_unchecked_t *v)
1156+{
1157+ u64 result;
1158+
1159+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1160+" ldrd %0, %H0, [%1]"
1161+ : "=&r" (result)
1162+ : "r" (&v->counter), "Qo" (v->counter)
1163+ );
1164+
1165+ return result;
1166+}
1167+
1168 static inline void atomic64_set(atomic64_t *v, u64 i)
1169 {
1170 __asm__ __volatile__("@ atomic64_set\n"
1171@@ -265,6 +473,15 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1172 : "r" (&v->counter), "r" (i)
1173 );
1174 }
1175+
1176+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1177+{
1178+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1179+" strd %2, %H2, [%1]"
1180+ : "=Qo" (v->counter)
1181+ : "r" (&v->counter), "r" (i)
1182+ );
1183+}
1184 #else
1185 static inline u64 atomic64_read(const atomic64_t *v)
1186 {
1187@@ -279,6 +496,19 @@ static inline u64 atomic64_read(const atomic64_t *v)
1188 return result;
1189 }
1190
1191+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
1192+{
1193+ u64 result;
1194+
1195+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1196+" ldrexd %0, %H0, [%1]"
1197+ : "=&r" (result)
1198+ : "r" (&v->counter), "Qo" (v->counter)
1199+ );
1200+
1201+ return result;
1202+}
1203+
1204 static inline void atomic64_set(atomic64_t *v, u64 i)
1205 {
1206 u64 tmp;
1207@@ -292,6 +522,21 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1208 : "r" (&v->counter), "r" (i)
1209 : "cc");
1210 }
1211+
1212+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1213+{
1214+ u64 tmp;
1215+
1216+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1217+"1: ldrexd %0, %H0, [%2]\n"
1218+" strexd %0, %3, %H3, [%2]\n"
1219+" teq %0, #0\n"
1220+" bne 1b"
1221+ : "=&r" (tmp), "=Qo" (v->counter)
1222+ : "r" (&v->counter), "r" (i)
1223+ : "cc");
1224+}
1225+
1226 #endif
1227
1228 static inline void atomic64_add(u64 i, atomic64_t *v)
1229@@ -302,6 +547,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1230 __asm__ __volatile__("@ atomic64_add\n"
1231 "1: ldrexd %0, %H0, [%3]\n"
1232 " adds %0, %0, %4\n"
1233+" adcs %H0, %H0, %H4\n"
1234+
1235+#ifdef CONFIG_PAX_REFCOUNT
1236+" bvc 3f\n"
1237+"2: bkpt 0xf103\n"
1238+"3:\n"
1239+#endif
1240+
1241+" strexd %1, %0, %H0, [%3]\n"
1242+" teq %1, #0\n"
1243+" bne 1b"
1244+
1245+#ifdef CONFIG_PAX_REFCOUNT
1246+"\n4:\n"
1247+ _ASM_EXTABLE(2b, 4b)
1248+#endif
1249+
1250+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1251+ : "r" (&v->counter), "r" (i)
1252+ : "cc");
1253+}
1254+
1255+static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1256+{
1257+ u64 result;
1258+ unsigned long tmp;
1259+
1260+ __asm__ __volatile__("@ atomic64_add_unchecked\n"
1261+"1: ldrexd %0, %H0, [%3]\n"
1262+" adds %0, %0, %4\n"
1263 " adc %H0, %H0, %H4\n"
1264 " strexd %1, %0, %H0, [%3]\n"
1265 " teq %1, #0\n"
1266@@ -313,12 +588,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1267
1268 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1269 {
1270- u64 result;
1271- unsigned long tmp;
1272+ u64 result, tmp;
1273
1274 smp_mb();
1275
1276 __asm__ __volatile__("@ atomic64_add_return\n"
1277+"1: ldrexd %1, %H1, [%3]\n"
1278+" adds %0, %1, %4\n"
1279+" adcs %H0, %H1, %H4\n"
1280+
1281+#ifdef CONFIG_PAX_REFCOUNT
1282+" bvc 3f\n"
1283+" mov %0, %1\n"
1284+" mov %H0, %H1\n"
1285+"2: bkpt 0xf103\n"
1286+"3:\n"
1287+#endif
1288+
1289+" strexd %1, %0, %H0, [%3]\n"
1290+" teq %1, #0\n"
1291+" bne 1b"
1292+
1293+#ifdef CONFIG_PAX_REFCOUNT
1294+"\n4:\n"
1295+ _ASM_EXTABLE(2b, 4b)
1296+#endif
1297+
1298+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1299+ : "r" (&v->counter), "r" (i)
1300+ : "cc");
1301+
1302+ smp_mb();
1303+
1304+ return result;
1305+}
1306+
1307+static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1308+{
1309+ u64 result;
1310+ unsigned long tmp;
1311+
1312+ smp_mb();
1313+
1314+ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1315 "1: ldrexd %0, %H0, [%3]\n"
1316 " adds %0, %0, %4\n"
1317 " adc %H0, %H0, %H4\n"
1318@@ -342,6 +654,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1319 __asm__ __volatile__("@ atomic64_sub\n"
1320 "1: ldrexd %0, %H0, [%3]\n"
1321 " subs %0, %0, %4\n"
1322+" sbcs %H0, %H0, %H4\n"
1323+
1324+#ifdef CONFIG_PAX_REFCOUNT
1325+" bvc 3f\n"
1326+"2: bkpt 0xf103\n"
1327+"3:\n"
1328+#endif
1329+
1330+" strexd %1, %0, %H0, [%3]\n"
1331+" teq %1, #0\n"
1332+" bne 1b"
1333+
1334+#ifdef CONFIG_PAX_REFCOUNT
1335+"\n4:\n"
1336+ _ASM_EXTABLE(2b, 4b)
1337+#endif
1338+
1339+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1340+ : "r" (&v->counter), "r" (i)
1341+ : "cc");
1342+}
1343+
1344+static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1345+{
1346+ u64 result;
1347+ unsigned long tmp;
1348+
1349+ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1350+"1: ldrexd %0, %H0, [%3]\n"
1351+" subs %0, %0, %4\n"
1352 " sbc %H0, %H0, %H4\n"
1353 " strexd %1, %0, %H0, [%3]\n"
1354 " teq %1, #0\n"
1355@@ -353,18 +695,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1356
1357 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1358 {
1359- u64 result;
1360- unsigned long tmp;
1361+ u64 result, tmp;
1362
1363 smp_mb();
1364
1365 __asm__ __volatile__("@ atomic64_sub_return\n"
1366-"1: ldrexd %0, %H0, [%3]\n"
1367-" subs %0, %0, %4\n"
1368-" sbc %H0, %H0, %H4\n"
1369+"1: ldrexd %1, %H1, [%3]\n"
1370+" subs %0, %1, %4\n"
1371+" sbcs %H0, %H1, %H4\n"
1372+
1373+#ifdef CONFIG_PAX_REFCOUNT
1374+" bvc 3f\n"
1375+" mov %0, %1\n"
1376+" mov %H0, %H1\n"
1377+"2: bkpt 0xf103\n"
1378+"3:\n"
1379+#endif
1380+
1381 " strexd %1, %0, %H0, [%3]\n"
1382 " teq %1, #0\n"
1383 " bne 1b"
1384+
1385+#ifdef CONFIG_PAX_REFCOUNT
1386+"\n4:\n"
1387+ _ASM_EXTABLE(2b, 4b)
1388+#endif
1389+
1390 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1391 : "r" (&v->counter), "r" (i)
1392 : "cc");
1393@@ -398,6 +754,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1394 return oldval;
1395 }
1396
1397+static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1398+{
1399+ u64 oldval;
1400+ unsigned long res;
1401+
1402+ smp_mb();
1403+
1404+ do {
1405+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1406+ "ldrexd %1, %H1, [%3]\n"
1407+ "mov %0, #0\n"
1408+ "teq %1, %4\n"
1409+ "teqeq %H1, %H4\n"
1410+ "strexdeq %0, %5, %H5, [%3]"
1411+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1412+ : "r" (&ptr->counter), "r" (old), "r" (new)
1413+ : "cc");
1414+ } while (res);
1415+
1416+ smp_mb();
1417+
1418+ return oldval;
1419+}
1420+
1421 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1422 {
1423 u64 result;
1424@@ -421,21 +801,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1425
1426 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1427 {
1428- u64 result;
1429- unsigned long tmp;
1430+ u64 result, tmp;
1431
1432 smp_mb();
1433
1434 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1435-"1: ldrexd %0, %H0, [%3]\n"
1436-" subs %0, %0, #1\n"
1437-" sbc %H0, %H0, #0\n"
1438+"1: ldrexd %1, %H1, [%3]\n"
1439+" subs %0, %1, #1\n"
1440+" sbcs %H0, %H1, #0\n"
1441+
1442+#ifdef CONFIG_PAX_REFCOUNT
1443+" bvc 3f\n"
1444+" mov %0, %1\n"
1445+" mov %H0, %H1\n"
1446+"2: bkpt 0xf103\n"
1447+"3:\n"
1448+#endif
1449+
1450 " teq %H0, #0\n"
1451-" bmi 2f\n"
1452+" bmi 4f\n"
1453 " strexd %1, %0, %H0, [%3]\n"
1454 " teq %1, #0\n"
1455 " bne 1b\n"
1456-"2:"
1457+"4:\n"
1458+
1459+#ifdef CONFIG_PAX_REFCOUNT
1460+ _ASM_EXTABLE(2b, 4b)
1461+#endif
1462+
1463 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1464 : "r" (&v->counter)
1465 : "cc");
1466@@ -458,13 +851,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1467 " teq %0, %5\n"
1468 " teqeq %H0, %H5\n"
1469 " moveq %1, #0\n"
1470-" beq 2f\n"
1471+" beq 4f\n"
1472 " adds %0, %0, %6\n"
1473-" adc %H0, %H0, %H6\n"
1474+" adcs %H0, %H0, %H6\n"
1475+
1476+#ifdef CONFIG_PAX_REFCOUNT
1477+" bvc 3f\n"
1478+"2: bkpt 0xf103\n"
1479+"3:\n"
1480+#endif
1481+
1482 " strexd %2, %0, %H0, [%4]\n"
1483 " teq %2, #0\n"
1484 " bne 1b\n"
1485-"2:"
1486+"4:\n"
1487+
1488+#ifdef CONFIG_PAX_REFCOUNT
1489+ _ASM_EXTABLE(2b, 4b)
1490+#endif
1491+
1492 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1493 : "r" (&v->counter), "r" (u), "r" (a)
1494 : "cc");
1495@@ -477,10 +882,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1496
1497 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1498 #define atomic64_inc(v) atomic64_add(1LL, (v))
1499+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1500 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1501+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1502 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1503 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1504 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1505+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1506 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1507 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1508 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1509diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1510index 75fe66b..ba3dee4 100644
1511--- a/arch/arm/include/asm/cache.h
1512+++ b/arch/arm/include/asm/cache.h
1513@@ -4,8 +4,10 @@
1514 #ifndef __ASMARM_CACHE_H
1515 #define __ASMARM_CACHE_H
1516
1517+#include <linux/const.h>
1518+
1519 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1520-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1521+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1522
1523 /*
1524 * Memory returned by kmalloc() may be used for DMA, so we must make
1525@@ -24,5 +26,6 @@
1526 #endif
1527
1528 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1529+#define __read_only __attribute__ ((__section__(".data..read_only")))
1530
1531 #endif
1532diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1533index 17d0ae8..014e350 100644
1534--- a/arch/arm/include/asm/cacheflush.h
1535+++ b/arch/arm/include/asm/cacheflush.h
1536@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1537 void (*dma_unmap_area)(const void *, size_t, int);
1538
1539 void (*dma_flush_range)(const void *, const void *);
1540-};
1541+} __no_const;
1542
1543 /*
1544 * Select the calling method
1545diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1546index 6dcc164..b14d917 100644
1547--- a/arch/arm/include/asm/checksum.h
1548+++ b/arch/arm/include/asm/checksum.h
1549@@ -37,7 +37,19 @@ __wsum
1550 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1551
1552 __wsum
1553-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1554+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1555+
1556+static inline __wsum
1557+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1558+{
1559+ __wsum ret;
1560+ pax_open_userland();
1561+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1562+ pax_close_userland();
1563+ return ret;
1564+}
1565+
1566+
1567
1568 /*
1569 * Fold a partial checksum without adding pseudo headers
1570diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1571index 4f009c1..466c59b 100644
1572--- a/arch/arm/include/asm/cmpxchg.h
1573+++ b/arch/arm/include/asm/cmpxchg.h
1574@@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1575
1576 #define xchg(ptr,x) \
1577 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1578+#define xchg_unchecked(ptr,x) \
1579+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1580
1581 #include <asm-generic/cmpxchg-local.h>
1582
1583diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1584index 6ddbe44..b5e38b1 100644
1585--- a/arch/arm/include/asm/domain.h
1586+++ b/arch/arm/include/asm/domain.h
1587@@ -48,18 +48,37 @@
1588 * Domain types
1589 */
1590 #define DOMAIN_NOACCESS 0
1591-#define DOMAIN_CLIENT 1
1592 #ifdef CONFIG_CPU_USE_DOMAINS
1593+#define DOMAIN_USERCLIENT 1
1594+#define DOMAIN_KERNELCLIENT 1
1595 #define DOMAIN_MANAGER 3
1596+#define DOMAIN_VECTORS DOMAIN_USER
1597 #else
1598+
1599+#ifdef CONFIG_PAX_KERNEXEC
1600 #define DOMAIN_MANAGER 1
1601+#define DOMAIN_KERNEXEC 3
1602+#else
1603+#define DOMAIN_MANAGER 1
1604+#endif
1605+
1606+#ifdef CONFIG_PAX_MEMORY_UDEREF
1607+#define DOMAIN_USERCLIENT 0
1608+#define DOMAIN_UDEREF 1
1609+#define DOMAIN_VECTORS DOMAIN_KERNEL
1610+#else
1611+#define DOMAIN_USERCLIENT 1
1612+#define DOMAIN_VECTORS DOMAIN_USER
1613+#endif
1614+#define DOMAIN_KERNELCLIENT 1
1615+
1616 #endif
1617
1618 #define domain_val(dom,type) ((type) << (2*(dom)))
1619
1620 #ifndef __ASSEMBLY__
1621
1622-#ifdef CONFIG_CPU_USE_DOMAINS
1623+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1624 static inline void set_domain(unsigned val)
1625 {
1626 asm volatile(
1627@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1628 isb();
1629 }
1630
1631-#define modify_domain(dom,type) \
1632- do { \
1633- struct thread_info *thread = current_thread_info(); \
1634- unsigned int domain = thread->cpu_domain; \
1635- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1636- thread->cpu_domain = domain | domain_val(dom, type); \
1637- set_domain(thread->cpu_domain); \
1638- } while (0)
1639-
1640+extern void modify_domain(unsigned int dom, unsigned int type);
1641 #else
1642 static inline void set_domain(unsigned val) { }
1643 static inline void modify_domain(unsigned dom, unsigned type) { }
1644diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1645index 56211f2..17e8a25 100644
1646--- a/arch/arm/include/asm/elf.h
1647+++ b/arch/arm/include/asm/elf.h
1648@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1649 the loader. We need to make sure that it is out of the way of the program
1650 that it will "exec", and that there is sufficient room for the brk. */
1651
1652-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1653+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1654+
1655+#ifdef CONFIG_PAX_ASLR
1656+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1657+
1658+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1659+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1660+#endif
1661
1662 /* When the program starts, a1 contains a pointer to a function to be
1663 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1664@@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1665 extern void elf_set_personality(const struct elf32_hdr *);
1666 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1667
1668-struct mm_struct;
1669-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1670-#define arch_randomize_brk arch_randomize_brk
1671-
1672 #ifdef CONFIG_MMU
1673 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1674 struct linux_binprm;
1675diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1676index de53547..52b9a28 100644
1677--- a/arch/arm/include/asm/fncpy.h
1678+++ b/arch/arm/include/asm/fncpy.h
1679@@ -81,7 +81,9 @@
1680 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1681 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1682 \
1683+ pax_open_kernel(); \
1684 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1685+ pax_close_kernel(); \
1686 flush_icache_range((unsigned long)(dest_buf), \
1687 (unsigned long)(dest_buf) + (size)); \
1688 \
1689diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1690index e42cf59..7b94b8f 100644
1691--- a/arch/arm/include/asm/futex.h
1692+++ b/arch/arm/include/asm/futex.h
1693@@ -50,6 +50,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1694 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1695 return -EFAULT;
1696
1697+ pax_open_userland();
1698+
1699 smp_mb();
1700 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1701 "1: ldrex %1, [%4]\n"
1702@@ -65,6 +67,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1703 : "cc", "memory");
1704 smp_mb();
1705
1706+ pax_close_userland();
1707+
1708 *uval = val;
1709 return ret;
1710 }
1711@@ -95,6 +99,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1712 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1713 return -EFAULT;
1714
1715+ pax_open_userland();
1716+
1717 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1718 "1: " TUSER(ldr) " %1, [%4]\n"
1719 " teq %1, %2\n"
1720@@ -105,6 +111,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1721 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1722 : "cc", "memory");
1723
1724+ pax_close_userland();
1725+
1726 *uval = val;
1727 return ret;
1728 }
1729@@ -127,6 +135,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1730 return -EFAULT;
1731
1732 pagefault_disable(); /* implies preempt_disable() */
1733+ pax_open_userland();
1734
1735 switch (op) {
1736 case FUTEX_OP_SET:
1737@@ -148,6 +157,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1738 ret = -ENOSYS;
1739 }
1740
1741+ pax_close_userland();
1742 pagefault_enable(); /* subsumes preempt_enable() */
1743
1744 if (!ret) {
1745diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1746index 83eb2f7..ed77159 100644
1747--- a/arch/arm/include/asm/kmap_types.h
1748+++ b/arch/arm/include/asm/kmap_types.h
1749@@ -4,6 +4,6 @@
1750 /*
1751 * This is the "bare minimum". AIO seems to require this.
1752 */
1753-#define KM_TYPE_NR 16
1754+#define KM_TYPE_NR 17
1755
1756 #endif
1757diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1758index 9e614a1..3302cca 100644
1759--- a/arch/arm/include/asm/mach/dma.h
1760+++ b/arch/arm/include/asm/mach/dma.h
1761@@ -22,7 +22,7 @@ struct dma_ops {
1762 int (*residue)(unsigned int, dma_t *); /* optional */
1763 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1764 const char *type;
1765-};
1766+} __do_const;
1767
1768 struct dma_struct {
1769 void *addr; /* single DMA address */
1770diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1771index 2fe141f..192dc01 100644
1772--- a/arch/arm/include/asm/mach/map.h
1773+++ b/arch/arm/include/asm/mach/map.h
1774@@ -27,13 +27,16 @@ struct map_desc {
1775 #define MT_MINICLEAN 6
1776 #define MT_LOW_VECTORS 7
1777 #define MT_HIGH_VECTORS 8
1778-#define MT_MEMORY 9
1779+#define MT_MEMORY_RWX 9
1780 #define MT_ROM 10
1781-#define MT_MEMORY_NONCACHED 11
1782+#define MT_MEMORY_NONCACHED_RX 11
1783 #define MT_MEMORY_DTCM 12
1784 #define MT_MEMORY_ITCM 13
1785 #define MT_MEMORY_SO 14
1786 #define MT_MEMORY_DMA_READY 15
1787+#define MT_MEMORY_RW 16
1788+#define MT_MEMORY_RX 17
1789+#define MT_MEMORY_NONCACHED_RW 18
1790
1791 #ifdef CONFIG_MMU
1792 extern void iotable_init(struct map_desc *, int);
1793diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1794index 12f71a1..04e063c 100644
1795--- a/arch/arm/include/asm/outercache.h
1796+++ b/arch/arm/include/asm/outercache.h
1797@@ -35,7 +35,7 @@ struct outer_cache_fns {
1798 #endif
1799 void (*set_debug)(unsigned long);
1800 void (*resume)(void);
1801-};
1802+} __no_const;
1803
1804 #ifdef CONFIG_OUTER_CACHE
1805
1806diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1807index cbdc7a2..32f44fe 100644
1808--- a/arch/arm/include/asm/page.h
1809+++ b/arch/arm/include/asm/page.h
1810@@ -114,7 +114,7 @@ struct cpu_user_fns {
1811 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1812 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1813 unsigned long vaddr, struct vm_area_struct *vma);
1814-};
1815+} __no_const;
1816
1817 #ifdef MULTI_USER
1818 extern struct cpu_user_fns cpu_user;
1819diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1820index 943504f..c37a730 100644
1821--- a/arch/arm/include/asm/pgalloc.h
1822+++ b/arch/arm/include/asm/pgalloc.h
1823@@ -17,6 +17,7 @@
1824 #include <asm/processor.h>
1825 #include <asm/cacheflush.h>
1826 #include <asm/tlbflush.h>
1827+#include <asm/system_info.h>
1828
1829 #define check_pgt_cache() do { } while (0)
1830
1831@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1832 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1833 }
1834
1835+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1836+{
1837+ pud_populate(mm, pud, pmd);
1838+}
1839+
1840 #else /* !CONFIG_ARM_LPAE */
1841
1842 /*
1843@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1844 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1845 #define pmd_free(mm, pmd) do { } while (0)
1846 #define pud_populate(mm,pmd,pte) BUG()
1847+#define pud_populate_kernel(mm,pmd,pte) BUG()
1848
1849 #endif /* CONFIG_ARM_LPAE */
1850
1851@@ -126,6 +133,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1852 __free_page(pte);
1853 }
1854
1855+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1856+{
1857+#ifdef CONFIG_ARM_LPAE
1858+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1859+#else
1860+ if (addr & SECTION_SIZE)
1861+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1862+ else
1863+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1864+#endif
1865+ flush_pmd_entry(pmdp);
1866+}
1867+
1868 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1869 pmdval_t prot)
1870 {
1871@@ -155,7 +175,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
1872 static inline void
1873 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
1874 {
1875- __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
1876+ __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE | __supported_pmd_mask);
1877 }
1878 #define pmd_pgtable(pmd) pmd_page(pmd)
1879
1880diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1881index 5cfba15..f415e1a 100644
1882--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1883+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1884@@ -20,12 +20,15 @@
1885 #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
1886 #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
1887 #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
1888+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
1889 #define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
1890 #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
1891 #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
1892+
1893 /*
1894 * - section
1895 */
1896+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1897 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1898 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1899 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1900@@ -37,6 +40,7 @@
1901 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1902 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1903 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1904+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
1905
1906 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1907 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1908@@ -66,6 +70,7 @@
1909 * - extended small page/tiny page
1910 */
1911 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
1912+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
1913 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
1914 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
1915 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
1916diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1917index f97ee02..cc9fe9e 100644
1918--- a/arch/arm/include/asm/pgtable-2level.h
1919+++ b/arch/arm/include/asm/pgtable-2level.h
1920@@ -126,6 +126,9 @@
1921 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1922 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
1923
1924+/* Two-level page tables only have PXN in the PGD, not in the PTE. */
1925+#define L_PTE_PXN (_AT(pteval_t, 0))
1926+
1927 /*
1928 * These are the memory types, defined to be compatible with
1929 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
1930diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
1931index 18f5cef..25b8f43 100644
1932--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
1933+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
1934@@ -41,6 +41,7 @@
1935 */
1936 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1937 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1938+#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7)
1939 #define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
1940 #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
1941 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
1942@@ -71,6 +72,7 @@
1943 #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1944 #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
1945 #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
1946+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1947 #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
1948
1949 /*
1950diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1951index 86b8fe3..e25f975 100644
1952--- a/arch/arm/include/asm/pgtable-3level.h
1953+++ b/arch/arm/include/asm/pgtable-3level.h
1954@@ -74,6 +74,7 @@
1955 #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
1956 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1957 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1958+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1959 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
1960 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
1961 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
1962@@ -82,6 +83,7 @@
1963 /*
1964 * To be used in assembly code with the upper page attributes.
1965 */
1966+#define L_PTE_PXN_HIGH (1 << (53 - 32))
1967 #define L_PTE_XN_HIGH (1 << (54 - 32))
1968 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
1969
1970diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
e2b79cd1 1971index 9bcd262..1ff999b 100644
bb5f0bf8
AF
1972--- a/arch/arm/include/asm/pgtable.h
1973+++ b/arch/arm/include/asm/pgtable.h
1974@@ -30,6 +30,9 @@
1975 #include <asm/pgtable-2level.h>
1976 #endif
1977
1978+#define ktla_ktva(addr) (addr)
1979+#define ktva_ktla(addr) (addr)
1980+
1981 /*
1982 * Just any arbitrary offset to the start of the vmalloc VM area: the
1983 * current 8MB value just means that there will be a 8MB "hole" after the
1984@@ -45,6 +48,9 @@
1985 #define LIBRARY_TEXT_START 0x0c000000
1986
1987 #ifndef __ASSEMBLY__
1988+extern pteval_t __supported_pte_mask;
1989+extern pmdval_t __supported_pmd_mask;
1990+
1991 extern void __pte_error(const char *file, int line, pte_t);
1992 extern void __pmd_error(const char *file, int line, pmd_t);
1993 extern void __pgd_error(const char *file, int line, pgd_t);
e2b79cd1 1994@@ -53,6 +59,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
bb5f0bf8
AF
1995 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
1996 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
1997
1998+#define __HAVE_ARCH_PAX_OPEN_KERNEL
1999+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2000+
e2b79cd1 2001+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
bb5f0bf8
AF
2002+#include <asm/domain.h>
2003+#include <linux/thread_info.h>
2004+#include <linux/preempt.h>
bb5f0bf8 2005+
bb5f0bf8
AF
2006+static inline int test_domain(int domain, int domaintype)
2007+{
2008+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2009+}
2010+#endif
2011+
2012+#ifdef CONFIG_PAX_KERNEXEC
2013+static inline unsigned long pax_open_kernel(void) {
2014+#ifdef CONFIG_ARM_LPAE
2015+ /* TODO */
2016+#else
2017+ preempt_disable();
2018+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2019+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2020+#endif
2021+ return 0;
2022+}
2023+
2024+static inline unsigned long pax_close_kernel(void) {
2025+#ifdef CONFIG_ARM_LPAE
2026+ /* TODO */
2027+#else
2028+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2029+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2030+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2031+ preempt_enable_no_resched();
2032+#endif
2033+ return 0;
2034+}
2035+#else
2036+static inline unsigned long pax_open_kernel(void) { return 0; }
2037+static inline unsigned long pax_close_kernel(void) { return 0; }
2038+#endif
2039+
2040 /*
2041 * This is the lowest virtual address we can permit any user space
2042 * mapping to be mapped at. This is particularly important for
e2b79cd1 2043@@ -72,8 +120,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
bb5f0bf8
AF
2044 /*
2045 * The pgprot_* and protection_map entries will be fixed up in runtime
2046 * to include the cachable and bufferable bits based on memory policy,
2047- * as well as any architecture dependent bits like global/ASID and SMP
2048- * shared mapping bits.
2049+ * as well as any architecture dependent bits like global/ASID, PXN,
2050+ * and SMP shared mapping bits.
2051 */
2052 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2053
e2b79cd1 2054@@ -257,7 +305,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
bb5f0bf8
AF
2055 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2056 {
2057 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2058- L_PTE_NONE | L_PTE_VALID;
2059+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2060 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2061 return pte;
2062 }
2063diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
2064index f3628fb..a0672dd 100644
2065--- a/arch/arm/include/asm/proc-fns.h
2066+++ b/arch/arm/include/asm/proc-fns.h
2067@@ -75,7 +75,7 @@ extern struct processor {
2068 unsigned int suspend_size;
2069 void (*do_suspend)(void *);
2070 void (*do_resume)(void *);
2071-} processor;
2072+} __do_const processor;
2073
2074 #ifndef MULTI_CPU
2075 extern void cpu_proc_init(void);
2076diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2077index ce0dbe7..c085b6f 100644
2078--- a/arch/arm/include/asm/psci.h
2079+++ b/arch/arm/include/asm/psci.h
2080@@ -29,7 +29,7 @@ struct psci_operations {
2081 int (*cpu_off)(struct psci_power_state state);
2082 int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
2083 int (*migrate)(unsigned long cpuid);
2084-};
2085+} __no_const;
2086
2087 extern struct psci_operations psci_ops;
2088
2089diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2090index d3a22be..3a69ad5 100644
2091--- a/arch/arm/include/asm/smp.h
2092+++ b/arch/arm/include/asm/smp.h
2093@@ -107,7 +107,7 @@ struct smp_operations {
2094 int (*cpu_disable)(unsigned int cpu);
2095 #endif
2096 #endif
2097-};
2098+} __no_const;
2099
2100 /*
2101 * set platform specific SMP operations
2102diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2103index f00b569..aa5bb41 100644
2104--- a/arch/arm/include/asm/thread_info.h
2105+++ b/arch/arm/include/asm/thread_info.h
2106@@ -77,9 +77,9 @@ struct thread_info {
2107 .flags = 0, \
2108 .preempt_count = INIT_PREEMPT_COUNT, \
2109 .addr_limit = KERNEL_DS, \
2110- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2111- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2112- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2113+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2114+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2115+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2116 .restart_block = { \
2117 .fn = do_no_restart_syscall, \
2118 }, \
2119@@ -152,7 +152,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2120 #define TIF_SYSCALL_AUDIT 9
2121 #define TIF_SYSCALL_TRACEPOINT 10
2122 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2123-#define TIF_NOHZ 12 /* in adaptive nohz mode */
2124+/* within 8 bits of TIF_SYSCALL_TRACE
2125+ * to meet flexible second operand requirements
2126+ */
2127+#define TIF_GRSEC_SETXID 12
2128+#define TIF_NOHZ 13 /* in adaptive nohz mode */
2129 #define TIF_USING_IWMMXT 17
2130 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2131 #define TIF_RESTORE_SIGMASK 20
2132@@ -165,10 +169,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2133 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2134 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2135 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2136+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2137
2138 /* Checks for any syscall work in entry-common.S */
2139 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2140- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2141+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2142
2143 /*
2144 * Change these and you break ASM code in entry-common.S
2145diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2146index 7e1f760..de33b13 100644
2147--- a/arch/arm/include/asm/uaccess.h
2148+++ b/arch/arm/include/asm/uaccess.h
2149@@ -18,6 +18,7 @@
2150 #include <asm/domain.h>
2151 #include <asm/unified.h>
2152 #include <asm/compiler.h>
2153+#include <asm/pgtable.h>
2154
2155 #define VERIFY_READ 0
2156 #define VERIFY_WRITE 1
2157@@ -63,11 +64,38 @@ extern int __put_user_bad(void);
2158 static inline void set_fs(mm_segment_t fs)
2159 {
2160 current_thread_info()->addr_limit = fs;
2161- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2162+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2163 }
2164
2165 #define segment_eq(a,b) ((a) == (b))
2166
2167+#define __HAVE_ARCH_PAX_OPEN_USERLAND
2168+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
2169+
2170+static inline void pax_open_userland(void)
2171+{
2172+
2173+#ifdef CONFIG_PAX_MEMORY_UDEREF
2174+ if (segment_eq(get_fs(), USER_DS)) {
2175+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2176+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2177+ }
2178+#endif
2179+
2180+}
2181+
2182+static inline void pax_close_userland(void)
2183+{
2184+
2185+#ifdef CONFIG_PAX_MEMORY_UDEREF
2186+ if (segment_eq(get_fs(), USER_DS)) {
2187+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2188+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2189+ }
2190+#endif
2191+
2192+}
2193+
2194 #define __addr_ok(addr) ({ \
2195 unsigned long flag; \
2196 __asm__("cmp %2, %0; movlo %0, #0" \
2197@@ -143,8 +171,12 @@ extern int __get_user_4(void *);
2198
2199 #define get_user(x,p) \
2200 ({ \
2201+ int __e; \
2202 might_fault(); \
2203- __get_user_check(x,p); \
2204+ pax_open_userland(); \
2205+ __e = __get_user_check(x,p); \
2206+ pax_close_userland(); \
2207+ __e; \
2208 })
2209
2210 extern int __put_user_1(void *, unsigned int);
2211@@ -188,8 +220,12 @@ extern int __put_user_8(void *, unsigned long long);
2212
2213 #define put_user(x,p) \
2214 ({ \
2215+ int __e; \
2216 might_fault(); \
2217- __put_user_check(x,p); \
2218+ pax_open_userland(); \
2219+ __e = __put_user_check(x,p); \
2220+ pax_close_userland(); \
2221+ __e; \
2222 })
2223
2224 #else /* CONFIG_MMU */
2225@@ -230,13 +266,17 @@ static inline void set_fs(mm_segment_t fs)
2226 #define __get_user(x,ptr) \
2227 ({ \
2228 long __gu_err = 0; \
2229+ pax_open_userland(); \
2230 __get_user_err((x),(ptr),__gu_err); \
2231+ pax_close_userland(); \
2232 __gu_err; \
2233 })
2234
2235 #define __get_user_error(x,ptr,err) \
2236 ({ \
2237+ pax_open_userland(); \
2238 __get_user_err((x),(ptr),err); \
2239+ pax_close_userland(); \
2240 (void) 0; \
2241 })
2242
2243@@ -312,13 +352,17 @@ do { \
2244 #define __put_user(x,ptr) \
2245 ({ \
2246 long __pu_err = 0; \
2247+ pax_open_userland(); \
2248 __put_user_err((x),(ptr),__pu_err); \
2249+ pax_close_userland(); \
2250 __pu_err; \
2251 })
2252
2253 #define __put_user_error(x,ptr,err) \
2254 ({ \
2255+ pax_open_userland(); \
2256 __put_user_err((x),(ptr),err); \
2257+ pax_close_userland(); \
2258 (void) 0; \
2259 })
2260
2261@@ -418,11 +462,44 @@ do { \
2262
2263
2264 #ifdef CONFIG_MMU
2265-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2266-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2267+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2268+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2269+
2270+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2271+{
2272+ unsigned long ret;
2273+
2274+ check_object_size(to, n, false);
2275+ pax_open_userland();
2276+ ret = ___copy_from_user(to, from, n);
2277+ pax_close_userland();
2278+ return ret;
2279+}
2280+
2281+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2282+{
2283+ unsigned long ret;
2284+
2285+ check_object_size(from, n, true);
2286+ pax_open_userland();
2287+ ret = ___copy_to_user(to, from, n);
2288+ pax_close_userland();
2289+ return ret;
2290+}
2291+
2292 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2293-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2294+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2295 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2296+
2297+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2298+{
2299+ unsigned long ret;
2300+ pax_open_userland();
2301+ ret = ___clear_user(addr, n);
2302+ pax_close_userland();
2303+ return ret;
2304+}
2305+
2306 #else
2307 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
2308 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
2309@@ -431,6 +508,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2310
2311 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2312 {
2313+ if ((long)n < 0)
2314+ return n;
2315+
2316 if (access_ok(VERIFY_READ, from, n))
2317 n = __copy_from_user(to, from, n);
2318 else /* security hole - plug it */
2319@@ -440,6 +520,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2320
2321 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2322 {
2323+ if ((long)n < 0)
2324+ return n;
2325+
2326 if (access_ok(VERIFY_WRITE, to, n))
2327 n = __copy_to_user(to, from, n);
2328 return n;
2329diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2330index 96ee092..37f1844 100644
2331--- a/arch/arm/include/uapi/asm/ptrace.h
2332+++ b/arch/arm/include/uapi/asm/ptrace.h
2333@@ -73,7 +73,7 @@
2334 * ARMv7 groups of PSR bits
2335 */
2336 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2337-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2338+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2339 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2340 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2341
2342diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2343index 60d3b73..e5a0f22 100644
2344--- a/arch/arm/kernel/armksyms.c
2345+++ b/arch/arm/kernel/armksyms.c
2346@@ -53,7 +53,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2347
2348 /* networking */
2349 EXPORT_SYMBOL(csum_partial);
2350-EXPORT_SYMBOL(csum_partial_copy_from_user);
2351+EXPORT_SYMBOL(__csum_partial_copy_from_user);
2352 EXPORT_SYMBOL(csum_partial_copy_nocheck);
2353 EXPORT_SYMBOL(__csum_ipv6_magic);
2354
2355@@ -89,9 +89,9 @@ EXPORT_SYMBOL(__memzero);
2356 #ifdef CONFIG_MMU
2357 EXPORT_SYMBOL(copy_page);
2358
2359-EXPORT_SYMBOL(__copy_from_user);
2360-EXPORT_SYMBOL(__copy_to_user);
2361-EXPORT_SYMBOL(__clear_user);
2362+EXPORT_SYMBOL(___copy_from_user);
2363+EXPORT_SYMBOL(___copy_to_user);
2364+EXPORT_SYMBOL(___clear_user);
2365
2366 EXPORT_SYMBOL(__get_user_1);
2367 EXPORT_SYMBOL(__get_user_2);
2368diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2369index d43c7e5..257c050 100644
2370--- a/arch/arm/kernel/entry-armv.S
2371+++ b/arch/arm/kernel/entry-armv.S
2372@@ -47,6 +47,87 @@
2373 9997:
2374 .endm
2375
2376+ .macro pax_enter_kernel
2377+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2378+ @ make aligned space for saved DACR
2379+ sub sp, sp, #8
2380+ @ save regs
2381+ stmdb sp!, {r1, r2}
2382+ @ read DACR from cpu_domain into r1
2383+ mov r2, sp
2384+ @ assume 8K pages, since we have to split the immediate in two
2385+ bic r2, r2, #(0x1fc0)
2386+ bic r2, r2, #(0x3f)
2387+ ldr r1, [r2, #TI_CPU_DOMAIN]
2388+ @ store old DACR on stack
2389+ str r1, [sp, #8]
2390+#ifdef CONFIG_PAX_KERNEXEC
2391+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2392+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2393+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2394+#endif
2395+#ifdef CONFIG_PAX_MEMORY_UDEREF
2396+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2397+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2398+#endif
2399+ @ write r1 to current_thread_info()->cpu_domain
2400+ str r1, [r2, #TI_CPU_DOMAIN]
2401+ @ write r1 to DACR
2402+ mcr p15, 0, r1, c3, c0, 0
2403+ @ instruction sync
2404+ instr_sync
2405+ @ restore regs
2406+ ldmia sp!, {r1, r2}
2407+#endif
2408+ .endm
2409+
2410+ .macro pax_open_userland
2411+#ifdef CONFIG_PAX_MEMORY_UDEREF
2412+ @ save regs
2413+ stmdb sp!, {r0, r1}
2414+ @ read DACR from cpu_domain into r1
2415+ mov r0, sp
2416+ @ assume 8K pages, since we have to split the immediate in two
2417+ bic r0, r0, #(0x1fc0)
2418+ bic r0, r0, #(0x3f)
2419+ ldr r1, [r0, #TI_CPU_DOMAIN]
2420+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2421+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2422+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2423+ @ write r1 to current_thread_info()->cpu_domain
2424+ str r1, [r0, #TI_CPU_DOMAIN]
2425+ @ write r1 to DACR
2426+ mcr p15, 0, r1, c3, c0, 0
2427+ @ instruction sync
2428+ instr_sync
2429+ @ restore regs
2430+ ldmia sp!, {r0, r1}
2431+#endif
2432+ .endm
2433+
2434+ .macro pax_close_userland
2435+#ifdef CONFIG_PAX_MEMORY_UDEREF
2436+ @ save regs
2437+ stmdb sp!, {r0, r1}
2438+ @ read DACR from cpu_domain into r1
2439+ mov r0, sp
2440+ @ assume 8K pages, since we have to split the immediate in two
2441+ bic r0, r0, #(0x1fc0)
2442+ bic r0, r0, #(0x3f)
2443+ ldr r1, [r0, #TI_CPU_DOMAIN]
2444+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2445+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2446+ @ write r1 to current_thread_info()->cpu_domain
2447+ str r1, [r0, #TI_CPU_DOMAIN]
2448+ @ write r1 to DACR
2449+ mcr p15, 0, r1, c3, c0, 0
2450+ @ instruction sync
2451+ instr_sync
2452+ @ restore regs
2453+ ldmia sp!, {r0, r1}
2454+#endif
2455+ .endm
2456+
2457 .macro pabt_helper
2458 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2459 #ifdef MULTI_PABORT
2460@@ -89,11 +170,15 @@
2461 * Invalid mode handlers
2462 */
2463 .macro inv_entry, reason
2464+
2465+ pax_enter_kernel
2466+
2467 sub sp, sp, #S_FRAME_SIZE
2468 ARM( stmib sp, {r1 - lr} )
2469 THUMB( stmia sp, {r0 - r12} )
2470 THUMB( str sp, [sp, #S_SP] )
2471 THUMB( str lr, [sp, #S_LR] )
2472+
2473 mov r1, #\reason
2474 .endm
2475
2476@@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
2477 .macro svc_entry, stack_hole=0
2478 UNWIND(.fnstart )
2479 UNWIND(.save {r0 - pc} )
2480+
2481+ pax_enter_kernel
2482+
2483 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2484+
2485 #ifdef CONFIG_THUMB2_KERNEL
2486 SPFIX( str r0, [sp] ) @ temporarily saved
2487 SPFIX( mov r0, sp )
2488@@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
2489 ldmia r0, {r3 - r5}
2490 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2491 mov r6, #-1 @ "" "" "" ""
2492+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2493+ @ offset sp by 8 as done in pax_enter_kernel
2494+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2495+#else
2496 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2497+#endif
2498 SPFIX( addeq r2, r2, #4 )
2499 str r3, [sp, #-4]! @ save the "real" r0 copied
2500 @ from the exception stack
2501@@ -316,6 +410,9 @@ ENDPROC(__pabt_svc)
2502 .macro usr_entry
2503 UNWIND(.fnstart )
2504 UNWIND(.cantunwind ) @ don't unwind the user space
2505+
2506+ pax_enter_kernel_user
2507+
2508 sub sp, sp, #S_FRAME_SIZE
2509 ARM( stmib sp, {r1 - r12} )
2510 THUMB( stmia sp, {r0 - r12} )
2511@@ -357,7 +454,8 @@ ENDPROC(__pabt_svc)
2512 .endm
2513
2514 .macro kuser_cmpxchg_check
2515-#if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
2516+#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \
2517+ !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
2518 #ifndef CONFIG_MMU
2519 #warning "NPTL on non MMU needs fixing"
2520 #else
2521@@ -414,7 +512,9 @@ __und_usr:
2522 tst r3, #PSR_T_BIT @ Thumb mode?
2523 bne __und_usr_thumb
2524 sub r4, r2, #4 @ ARM instr at LR - 4
2525+ pax_open_userland
2526 1: ldrt r0, [r4]
2527+ pax_close_userland
2528 #ifdef CONFIG_CPU_ENDIAN_BE8
2529 rev r0, r0 @ little endian instruction
2530 #endif
2531@@ -449,10 +549,14 @@ __und_usr_thumb:
2532 */
2533 .arch armv6t2
2534 #endif
2535+ pax_open_userland
2536 2: ldrht r5, [r4]
2537+ pax_close_userland
2538 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2539 blo __und_usr_fault_16 @ 16bit undefined instruction
2540+ pax_open_userland
2541 3: ldrht r0, [r2]
2542+ pax_close_userland
2543 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2544 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2545 orr r0, r0, r5, lsl #16
2546@@ -481,7 +585,8 @@ ENDPROC(__und_usr)
2547 */
2548 .pushsection .fixup, "ax"
2549 .align 2
2550-4: mov pc, r9
2551+4: pax_close_userland
2552+ mov pc, r9
2553 .popsection
2554 .pushsection __ex_table,"a"
2555 .long 1b, 4b
2556@@ -690,7 +795,7 @@ ENTRY(__switch_to)
2557 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
2558 THUMB( str sp, [ip], #4 )
2559 THUMB( str lr, [ip], #4 )
2560-#ifdef CONFIG_CPU_USE_DOMAINS
2561+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2562 ldr r6, [r2, #TI_CPU_DOMAIN]
2563 #endif
2564 set_tls r3, r4, r5
2565@@ -699,7 +804,7 @@ ENTRY(__switch_to)
2566 ldr r8, =__stack_chk_guard
2567 ldr r7, [r7, #TSK_STACK_CANARY]
2568 #endif
2569-#ifdef CONFIG_CPU_USE_DOMAINS
2570+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2571 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2572 #endif
2573 mov r5, r0
2574diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2575index bc5bc0a..d0998ca 100644
2576--- a/arch/arm/kernel/entry-common.S
2577+++ b/arch/arm/kernel/entry-common.S
2578@@ -10,18 +10,46 @@
2579
2580 #include <asm/unistd.h>
2581 #include <asm/ftrace.h>
2582+#include <asm/domain.h>
2583 #include <asm/unwind.h>
2584
2585+#include "entry-header.S"
2586+
2587 #ifdef CONFIG_NEED_RET_TO_USER
2588 #include <mach/entry-macro.S>
2589 #else
2590 .macro arch_ret_to_user, tmp1, tmp2
2591+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2592+ @ save regs
2593+ stmdb sp!, {r1, r2}
2594+ @ read DACR from cpu_domain into r1
2595+ mov r2, sp
2596+ @ assume 8K pages, since we have to split the immediate in two
2597+ bic r2, r2, #(0x1fc0)
2598+ bic r2, r2, #(0x3f)
2599+ ldr r1, [r2, #TI_CPU_DOMAIN]
2600+#ifdef CONFIG_PAX_KERNEXEC
2601+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2602+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2603+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2604+#endif
2605+#ifdef CONFIG_PAX_MEMORY_UDEREF
2606+ @ set current DOMAIN_USER to DOMAIN_UDEREF
2607+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2608+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2609+#endif
2610+ @ write r1 to current_thread_info()->cpu_domain
2611+ str r1, [r2, #TI_CPU_DOMAIN]
2612+ @ write r1 to DACR
2613+ mcr p15, 0, r1, c3, c0, 0
2614+ @ instruction sync
2615+ instr_sync
2616+ @ restore regs
2617+ ldmia sp!, {r1, r2}
2618+#endif
2619 .endm
2620 #endif
2621
2622-#include "entry-header.S"
2623-
2624-
2625 .align 5
2626 /*
2627 * This is the fast syscall return path. We do as little as
2628@@ -350,6 +378,7 @@ ENDPROC(ftrace_stub)
2629
2630 .align 5
2631 ENTRY(vector_swi)
2632+
2633 sub sp, sp, #S_FRAME_SIZE
2634 stmia sp, {r0 - r12} @ Calling r0 - r12
2635 ARM( add r8, sp, #S_PC )
2636@@ -399,6 +428,12 @@ ENTRY(vector_swi)
2637 ldr scno, [lr, #-4] @ get SWI instruction
2638 #endif
2639
2640+ /*
2641+ * do this here to avoid a performance hit of wrapping the code above
2642+ * that directly dereferences userland to parse the SWI instruction
2643+ */
2644+ pax_enter_kernel_user
2645+
2646 #ifdef CONFIG_ALIGNMENT_TRAP
2647 ldr ip, __cr_alignment
2648 ldr ip, [ip]
2649diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2650index 160f337..db67ee4 100644
2651--- a/arch/arm/kernel/entry-header.S
2652+++ b/arch/arm/kernel/entry-header.S
2653@@ -73,6 +73,60 @@
2654 msr cpsr_c, \rtemp @ switch back to the SVC mode
2655 .endm
2656
2657+ .macro pax_enter_kernel_user
2658+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2659+ @ save regs
2660+ stmdb sp!, {r0, r1}
2661+ @ read DACR from cpu_domain into r1
2662+ mov r0, sp
2663+ @ assume 8K pages, since we have to split the immediate in two
2664+ bic r0, r0, #(0x1fc0)
2665+ bic r0, r0, #(0x3f)
2666+ ldr r1, [r0, #TI_CPU_DOMAIN]
2667+#ifdef CONFIG_PAX_MEMORY_UDEREF
2668+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2669+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2670+#endif
2671+#ifdef CONFIG_PAX_KERNEXEC
2672+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2673+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2674+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2675+#endif
2676+ @ write r1 to current_thread_info()->cpu_domain
2677+ str r1, [r0, #TI_CPU_DOMAIN]
2678+ @ write r1 to DACR
2679+ mcr p15, 0, r1, c3, c0, 0
2680+ @ instruction sync
2681+ instr_sync
2682+ @ restore regs
2683+ ldmia sp!, {r0, r1}
2684+#endif
2685+ .endm
2686+
2687+ .macro pax_exit_kernel
2688+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2689+ @ save regs
2690+ stmdb sp!, {r0, r1}
2691+ @ read old DACR from stack into r1
2692+ ldr r1, [sp, #(8 + S_SP)]
2693+ sub r1, r1, #8
2694+ ldr r1, [r1]
2695+
2696+ @ write r1 to current_thread_info()->cpu_domain
2697+ mov r0, sp
2698+ @ assume 8K pages, since we have to split the immediate in two
2699+ bic r0, r0, #(0x1fc0)
2700+ bic r0, r0, #(0x3f)
2701+ str r1, [r0, #TI_CPU_DOMAIN]
2702+ @ write r1 to DACR
2703+ mcr p15, 0, r1, c3, c0, 0
2704+ @ instruction sync
2705+ instr_sync
2706+ @ restore regs
2707+ ldmia sp!, {r0, r1}
2708+#endif
2709+ .endm
2710+
2711 #ifndef CONFIG_THUMB2_KERNEL
2712 .macro svc_exit, rpsr, irq = 0
2713 .if \irq != 0
2714@@ -92,6 +146,9 @@
2715 blne trace_hardirqs_off
2716 #endif
2717 .endif
2718+
2719+ pax_exit_kernel
2720+
2721 msr spsr_cxsf, \rpsr
2722 #if defined(CONFIG_CPU_V6)
2723 ldr r0, [sp]
2724@@ -155,6 +212,9 @@
2725 blne trace_hardirqs_off
2726 #endif
2727 .endif
2728+
2729+ pax_exit_kernel
2730+
2731 ldr lr, [sp, #S_SP] @ top of the stack
2732 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2733 clrex @ clear the exclusive monitor
2734diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2735index 25442f4..d4948fc 100644
2736--- a/arch/arm/kernel/fiq.c
2737+++ b/arch/arm/kernel/fiq.c
2738@@ -84,17 +84,16 @@ int show_fiq_list(struct seq_file *p, int prec)
2739
2740 void set_fiq_handler(void *start, unsigned int length)
2741 {
2742-#if defined(CONFIG_CPU_USE_DOMAINS)
2743- void *base = (void *)0xffff0000;
2744-#else
2745 void *base = vectors_page;
2746-#endif
2747 unsigned offset = FIQ_OFFSET;
2748
2749+ pax_open_kernel();
2750 memcpy(base + offset, start, length);
2751+ pax_close_kernel();
2752+
2753+ if (!cache_is_vipt_nonaliasing())
2754+ flush_icache_range(base + offset, offset + length);
2755 flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length);
2756- if (!vectors_high())
2757- flush_icache_range(offset, offset + length);
2758 }
2759
2760 int claim_fiq(struct fiq_handler *f)
2761diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2762index 8bac553..caee108 100644
2763--- a/arch/arm/kernel/head.S
2764+++ b/arch/arm/kernel/head.S
2765@@ -52,7 +52,9 @@
2766 .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
2767
2768 .macro pgtbl, rd, phys
2769- add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE
2770+ mov \rd, #TEXT_OFFSET
2771+ sub \rd, #PG_DIR_SIZE
2772+ add \rd, \rd, \phys
2773 .endm
2774
2775 /*
2776@@ -434,7 +436,7 @@ __enable_mmu:
2777 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2778 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2779 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2780- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2781+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2782 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2783 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2784 #endif
2785diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
2786index 1fd749e..47adb08 100644
2787--- a/arch/arm/kernel/hw_breakpoint.c
2788+++ b/arch/arm/kernel/hw_breakpoint.c
2789@@ -1029,7 +1029,7 @@ static int __cpuinit dbg_reset_notify(struct notifier_block *self,
2790 return NOTIFY_OK;
2791 }
2792
2793-static struct notifier_block __cpuinitdata dbg_reset_nb = {
2794+static struct notifier_block dbg_reset_nb = {
2795 .notifier_call = dbg_reset_notify,
2796 };
2797
2798diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2799index 1e9be5d..03edbc2 100644
2800--- a/arch/arm/kernel/module.c
2801+++ b/arch/arm/kernel/module.c
2802@@ -37,12 +37,37 @@
2803 #endif
2804
2805 #ifdef CONFIG_MMU
2806-void *module_alloc(unsigned long size)
2807+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2808 {
2809+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2810+ return NULL;
2811 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2812- GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
2813+ GFP_KERNEL, prot, -1,
2814 __builtin_return_address(0));
2815 }
2816+
2817+void *module_alloc(unsigned long size)
2818+{
2819+
2820+#ifdef CONFIG_PAX_KERNEXEC
2821+ return __module_alloc(size, PAGE_KERNEL);
2822+#else
2823+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2824+#endif
2825+
2826+}
2827+
2828+#ifdef CONFIG_PAX_KERNEXEC
2829+void module_free_exec(struct module *mod, void *module_region)
2830+{
2831+ module_free(mod, module_region);
2832+}
2833+
2834+void *module_alloc_exec(unsigned long size)
2835+{
2836+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2837+}
2838+#endif
2839 #endif
2840
2841 int
2842diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2843index 07314af..c46655c 100644
2844--- a/arch/arm/kernel/patch.c
2845+++ b/arch/arm/kernel/patch.c
2846@@ -18,6 +18,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2847 bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
2848 int size;
2849
2850+ pax_open_kernel();
2851 if (thumb2 && __opcode_is_thumb16(insn)) {
2852 *(u16 *)addr = __opcode_to_mem_thumb16(insn);
2853 size = sizeof(u16);
2854@@ -39,6 +40,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2855 *(u32 *)addr = insn;
2856 size = sizeof(u32);
2857 }
2858+ pax_close_kernel();
2859
2860 flush_icache_range((uintptr_t)(addr),
2861 (uintptr_t)(addr) + size);
2862diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
2863index e19edc6..e186ee1 100644
2864--- a/arch/arm/kernel/perf_event.c
2865+++ b/arch/arm/kernel/perf_event.c
2866@@ -56,7 +56,7 @@ armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
2867 int mapping;
2868
2869 if (config >= PERF_COUNT_HW_MAX)
2870- return -ENOENT;
2871+ return -EINVAL;
2872
2873 mapping = (*event_map)[config];
2874 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
2875diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
2876index 1f2740e..b36e225 100644
2877--- a/arch/arm/kernel/perf_event_cpu.c
2878+++ b/arch/arm/kernel/perf_event_cpu.c
2879@@ -171,7 +171,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
2880 return NOTIFY_OK;
2881 }
2882
2883-static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = {
2884+static struct notifier_block cpu_pmu_hotplug_notifier = {
2885 .notifier_call = cpu_pmu_notify,
2886 };
2887
2888diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2889index 5bc2615..dcd439f 100644
2890--- a/arch/arm/kernel/process.c
2891+++ b/arch/arm/kernel/process.c
2892@@ -223,6 +223,7 @@ void machine_power_off(void)
2893
2894 if (pm_power_off)
2895 pm_power_off();
2896+ BUG();
2897 }
2898
2899 /*
2900@@ -236,7 +237,7 @@ void machine_power_off(void)
2901 * executing pre-reset code, and using RAM that the primary CPU's code wishes
2902 * to use. Implementing such co-ordination would be essentially impossible.
2903 */
2904-void machine_restart(char *cmd)
2905+__noreturn void machine_restart(char *cmd)
2906 {
2907 smp_send_stop();
2908
2909@@ -258,8 +259,8 @@ void __show_regs(struct pt_regs *regs)
2910
2911 show_regs_print_info(KERN_DEFAULT);
2912
2913- print_symbol("PC is at %s\n", instruction_pointer(regs));
2914- print_symbol("LR is at %s\n", regs->ARM_lr);
2915+ printk("PC is at %pA\n", (void *)instruction_pointer(regs));
2916+ printk("LR is at %pA\n", (void *)regs->ARM_lr);
2917 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2918 "sp : %08lx ip : %08lx fp : %08lx\n",
2919 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2920@@ -426,12 +427,6 @@ unsigned long get_wchan(struct task_struct *p)
2921 return 0;
2922 }
2923
2924-unsigned long arch_randomize_brk(struct mm_struct *mm)
2925-{
2926- unsigned long range_end = mm->brk + 0x02000000;
2927- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2928-}
2929-
2930 #ifdef CONFIG_MMU
2931 #ifdef CONFIG_KUSER_HELPERS
2932 /*
2933@@ -447,7 +442,7 @@ static struct vm_area_struct gate_vma = {
2934
2935 static int __init gate_vma_init(void)
2936 {
2937- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
2938+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
2939 return 0;
2940 }
2941 arch_initcall(gate_vma_init);
2942@@ -466,48 +461,23 @@ int in_gate_area_no_mm(unsigned long addr)
2943 {
2944 return in_gate_area(NULL, addr);
2945 }
2946-#define is_gate_vma(vma) ((vma) = &gate_vma)
2947+#define is_gate_vma(vma) ((vma) == &gate_vma)
2948 #else
2949 #define is_gate_vma(vma) 0
2950 #endif
2951
2952 const char *arch_vma_name(struct vm_area_struct *vma)
2953 {
2954- return is_gate_vma(vma) ? "[vectors]" :
2955- (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ?
2956- "[sigpage]" : NULL;
2957+ return is_gate_vma(vma) ? "[vectors]" : NULL;
2958 }
2959
2960-static struct page *signal_page;
2961-extern struct page *get_signal_page(void);
2962-
2963 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2964 {
2965 struct mm_struct *mm = current->mm;
2966- unsigned long addr;
2967- int ret;
2968-
2969- if (!signal_page)
2970- signal_page = get_signal_page();
2971- if (!signal_page)
2972- return -ENOMEM;
2973
2974 down_write(&mm->mmap_sem);
2975- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
2976- if (IS_ERR_VALUE(addr)) {
2977- ret = addr;
2978- goto up_fail;
2979- }
2980-
2981- ret = install_special_mapping(mm, addr, PAGE_SIZE,
2982- VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
2983- &signal_page);
2984-
2985- if (ret == 0)
2986- mm->context.sigpage = addr;
2987-
2988- up_fail:
2989+ mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
2990 up_write(&mm->mmap_sem);
2991- return ret;
2992+ return 0;
2993 }
2994 #endif
2995diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
2996index 3653164..d83e55d 100644
2997--- a/arch/arm/kernel/psci.c
2998+++ b/arch/arm/kernel/psci.c
2999@@ -24,7 +24,7 @@
3000 #include <asm/opcodes-virt.h>
3001 #include <asm/psci.h>
3002
3003-struct psci_operations psci_ops;
3004+struct psci_operations psci_ops __read_only;
3005
3006 static int (*invoke_psci_fn)(u32, u32, u32, u32);
3007
3008diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
3009index 03deeff..741ce88 100644
3010--- a/arch/arm/kernel/ptrace.c
3011+++ b/arch/arm/kernel/ptrace.c
3012@@ -937,10 +937,19 @@ static int tracehook_report_syscall(struct pt_regs *regs,
3013 return current_thread_info()->syscall;
3014 }
3015
3016+#ifdef CONFIG_GRKERNSEC_SETXID
3017+extern void gr_delayed_cred_worker(void);
3018+#endif
3019+
3020 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
3021 {
3022 current_thread_info()->syscall = scno;
3023
3024+#ifdef CONFIG_GRKERNSEC_SETXID
3025+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3026+ gr_delayed_cred_worker();
3027+#endif
3028+
3029 /* Do the secure computing check first; failures should be fast. */
3030 if (secure_computing(scno) == -1)
3031 return -1;
3032diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
3033index b4b1d39..efdc9be 100644
3034--- a/arch/arm/kernel/setup.c
3035+++ b/arch/arm/kernel/setup.c
3036@@ -97,21 +97,23 @@ EXPORT_SYMBOL(system_serial_high);
3037 unsigned int elf_hwcap __read_mostly;
3038 EXPORT_SYMBOL(elf_hwcap);
3039
3040+pteval_t __supported_pte_mask __read_only;
3041+pmdval_t __supported_pmd_mask __read_only;
3042
3043 #ifdef MULTI_CPU
3044-struct processor processor __read_mostly;
3045+struct processor processor;
3046 #endif
3047 #ifdef MULTI_TLB
3048-struct cpu_tlb_fns cpu_tlb __read_mostly;
3049+struct cpu_tlb_fns cpu_tlb __read_only;
3050 #endif
3051 #ifdef MULTI_USER
3052-struct cpu_user_fns cpu_user __read_mostly;
3053+struct cpu_user_fns cpu_user __read_only;
3054 #endif
3055 #ifdef MULTI_CACHE
3056-struct cpu_cache_fns cpu_cache __read_mostly;
3057+struct cpu_cache_fns cpu_cache __read_only;
3058 #endif
3059 #ifdef CONFIG_OUTER_CACHE
3060-struct outer_cache_fns outer_cache __read_mostly;
3061+struct outer_cache_fns outer_cache __read_only;
3062 EXPORT_SYMBOL(outer_cache);
3063 #endif
3064
3065@@ -236,9 +238,13 @@ static int __get_cpu_architecture(void)
3066 asm("mrc p15, 0, %0, c0, c1, 4"
3067 : "=r" (mmfr0));
3068 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3069- (mmfr0 & 0x000000f0) >= 0x00000030)
3070+ (mmfr0 & 0x000000f0) >= 0x00000030) {
3071 cpu_arch = CPU_ARCH_ARMv7;
3072- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3073+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3074+ __supported_pte_mask |= L_PTE_PXN;
3075+ __supported_pmd_mask |= PMD_PXNTABLE;
3076+ }
3077+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3078 (mmfr0 & 0x000000f0) == 0x00000020)
3079 cpu_arch = CPU_ARCH_ARMv6;
3080 else
3081@@ -479,7 +485,7 @@ static void __init setup_processor(void)
3082 __cpu_architecture = __get_cpu_architecture();
3083
3084 #ifdef MULTI_CPU
3085- processor = *list->proc;
3086+ memcpy((void *)&processor, list->proc, sizeof processor);
3087 #endif
3088 #ifdef MULTI_TLB
3089 cpu_tlb = *list->tlb;
3090diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3091index 5a42c12..a2bb7c6 100644
3092--- a/arch/arm/kernel/signal.c
3093+++ b/arch/arm/kernel/signal.c
3094@@ -45,8 +45,6 @@ static const unsigned long sigreturn_codes[7] = {
3095 MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
3096 };
3097
3098-static unsigned long signal_return_offset;
3099-
3100 #ifdef CONFIG_CRUNCH
3101 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
3102 {
3103@@ -406,8 +404,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
3104 * except when the MPU has protected the vectors
3105 * page from PL0
3106 */
3107- retcode = mm->context.sigpage + signal_return_offset +
3108- (idx << 2) + thumb;
3109+ retcode = mm->context.sigpage + (idx << 2) + thumb;
3110 } else
3111 #endif
3112 {
3113@@ -611,33 +608,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
3114 } while (thread_flags & _TIF_WORK_MASK);
3115 return 0;
3116 }
3117-
3118-struct page *get_signal_page(void)
3119-{
3120- unsigned long ptr;
3121- unsigned offset;
3122- struct page *page;
3123- void *addr;
3124-
3125- page = alloc_pages(GFP_KERNEL, 0);
3126-
3127- if (!page)
3128- return NULL;
3129-
3130- addr = page_address(page);
3131-
3132- /* Give the signal return code some randomness */
3133- offset = 0x200 + (get_random_int() & 0x7fc);
3134- signal_return_offset = offset;
3135-
3136- /*
3137- * Copy signal return handlers into the vector page, and
3138- * set sigreturn to be a pointer to these.
3139- */
3140- memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
3141-
3142- ptr = (unsigned long)addr + offset;
3143- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
3144-
3145- return page;
3146-}
3147diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3148index 5919eb4..b5d6dfe 100644
3149--- a/arch/arm/kernel/smp.c
3150+++ b/arch/arm/kernel/smp.c
3151@@ -70,7 +70,7 @@ enum ipi_msg_type {
3152
3153 static DECLARE_COMPLETION(cpu_running);
3154
3155-static struct smp_operations smp_ops;
3156+static struct smp_operations smp_ops __read_only;
3157
3158 void __init smp_set_ops(struct smp_operations *ops)
3159 {
3160diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3161index 6b9567e..b8af2d6 100644
3162--- a/arch/arm/kernel/traps.c
3163+++ b/arch/arm/kernel/traps.c
3164@@ -55,7 +55,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3165 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3166 {
3167 #ifdef CONFIG_KALLSYMS
3168- printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3169+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3170 #else
3171 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3172 #endif
3173@@ -257,6 +257,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3174 static int die_owner = -1;
3175 static unsigned int die_nest_count;
3176
3177+extern void gr_handle_kernel_exploit(void);
3178+
3179 static unsigned long oops_begin(void)
3180 {
3181 int cpu;
3182@@ -299,6 +301,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3183 panic("Fatal exception in interrupt");
3184 if (panic_on_oops)
3185 panic("Fatal exception");
3186+
3187+ gr_handle_kernel_exploit();
3188+
3189 if (signr)
3190 do_exit(signr);
3191 }
3192@@ -592,7 +597,9 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
3193 * The user helper at 0xffff0fe0 must be used instead.
3194 * (see entry-armv.S for details)
3195 */
3196+ pax_open_kernel();
3197 *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
3198+ pax_close_kernel();
3199 }
3200 return 0;
3201
3202@@ -848,5 +855,9 @@ void __init early_trap_init(void *vectors_base)
3203 kuser_init(vectors_base);
3204
3205 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
3206- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3207+
3208+#ifndef CONFIG_PAX_MEMORY_UDEREF
3209+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3210+#endif
3211+
3212 }
3213diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3214index 33f2ea3..0b91824 100644
3215--- a/arch/arm/kernel/vmlinux.lds.S
3216+++ b/arch/arm/kernel/vmlinux.lds.S
3217@@ -8,7 +8,11 @@
3218 #include <asm/thread_info.h>
3219 #include <asm/memory.h>
3220 #include <asm/page.h>
3221-
3222+
3223+#ifdef CONFIG_PAX_KERNEXEC
3224+#include <asm/pgtable.h>
3225+#endif
3226+
3227 #define PROC_INFO \
3228 . = ALIGN(4); \
3229 VMLINUX_SYMBOL(__proc_info_begin) = .; \
3230@@ -94,6 +98,11 @@ SECTIONS
3231 _text = .;
3232 HEAD_TEXT
3233 }
3234+
3235+#ifdef CONFIG_PAX_KERNEXEC
3236+ . = ALIGN(1<<SECTION_SHIFT);
3237+#endif
3238+
3239 .text : { /* Real text segment */
3240 _stext = .; /* Text and read-only data */
3241 __exception_text_start = .;
3242@@ -116,6 +125,8 @@ SECTIONS
3243 ARM_CPU_KEEP(PROC_INFO)
3244 }
3245
3246+ _etext = .; /* End of text section */
3247+
3248 RO_DATA(PAGE_SIZE)
3249
3250 . = ALIGN(4);
3251@@ -146,7 +157,9 @@ SECTIONS
3252
3253 NOTES
3254
3255- _etext = .; /* End of text and rodata section */
3256+#ifdef CONFIG_PAX_KERNEXEC
3257+ . = ALIGN(1<<SECTION_SHIFT);
3258+#endif
3259
3260 #ifndef CONFIG_XIP_KERNEL
3261 . = ALIGN(PAGE_SIZE);
3262@@ -224,6 +237,11 @@ SECTIONS
3263 . = PAGE_OFFSET + TEXT_OFFSET;
3264 #else
3265 __init_end = .;
3266+
3267+#ifdef CONFIG_PAX_KERNEXEC
3268+ . = ALIGN(1<<SECTION_SHIFT);
3269+#endif
3270+
3271 . = ALIGN(THREAD_SIZE);
3272 __data_loc = .;
3273 #endif
3274diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3275index 14a0d98..7771a7d 100644
3276--- a/arch/arm/lib/clear_user.S
3277+++ b/arch/arm/lib/clear_user.S
3278@@ -12,14 +12,14 @@
3279
3280 .text
3281
3282-/* Prototype: int __clear_user(void *addr, size_t sz)
3283+/* Prototype: int ___clear_user(void *addr, size_t sz)
3284 * Purpose : clear some user memory
3285 * Params : addr - user memory address to clear
3286 * : sz - number of bytes to clear
3287 * Returns : number of bytes NOT cleared
3288 */
3289 ENTRY(__clear_user_std)
3290-WEAK(__clear_user)
3291+WEAK(___clear_user)
3292 stmfd sp!, {r1, lr}
3293 mov r2, #0
3294 cmp r1, #4
3295@@ -44,7 +44,7 @@ WEAK(__clear_user)
3296 USER( strnebt r2, [r0])
3297 mov r0, #0
3298 ldmfd sp!, {r1, pc}
3299-ENDPROC(__clear_user)
3300+ENDPROC(___clear_user)
3301 ENDPROC(__clear_user_std)
3302
3303 .pushsection .fixup,"ax"
3304diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3305index 66a477a..bee61d3 100644
3306--- a/arch/arm/lib/copy_from_user.S
3307+++ b/arch/arm/lib/copy_from_user.S
3308@@ -16,7 +16,7 @@
3309 /*
3310 * Prototype:
3311 *
3312- * size_t __copy_from_user(void *to, const void *from, size_t n)
3313+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3314 *
3315 * Purpose:
3316 *
3317@@ -84,11 +84,11 @@
3318
3319 .text
3320
3321-ENTRY(__copy_from_user)
3322+ENTRY(___copy_from_user)
3323
3324 #include "copy_template.S"
3325
3326-ENDPROC(__copy_from_user)
3327+ENDPROC(___copy_from_user)
3328
3329 .pushsection .fixup,"ax"
3330 .align 0
3331diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3332index 6ee2f67..d1cce76 100644
3333--- a/arch/arm/lib/copy_page.S
3334+++ b/arch/arm/lib/copy_page.S
3335@@ -10,6 +10,7 @@
3336 * ASM optimised string functions
3337 */
3338 #include <linux/linkage.h>
3339+#include <linux/const.h>
3340 #include <asm/assembler.h>
3341 #include <asm/asm-offsets.h>
3342 #include <asm/cache.h>
3343diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3344index d066df6..df28194 100644
3345--- a/arch/arm/lib/copy_to_user.S
3346+++ b/arch/arm/lib/copy_to_user.S
3347@@ -16,7 +16,7 @@
3348 /*
3349 * Prototype:
3350 *
3351- * size_t __copy_to_user(void *to, const void *from, size_t n)
3352+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3353 *
3354 * Purpose:
3355 *
3356@@ -88,11 +88,11 @@
3357 .text
3358
3359 ENTRY(__copy_to_user_std)
3360-WEAK(__copy_to_user)
3361+WEAK(___copy_to_user)
3362
3363 #include "copy_template.S"
3364
3365-ENDPROC(__copy_to_user)
3366+ENDPROC(___copy_to_user)
3367 ENDPROC(__copy_to_user_std)
3368
3369 .pushsection .fixup,"ax"
3370diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3371index 7d08b43..f7ca7ea 100644
3372--- a/arch/arm/lib/csumpartialcopyuser.S
3373+++ b/arch/arm/lib/csumpartialcopyuser.S
3374@@ -57,8 +57,8 @@
3375 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3376 */
3377
3378-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3379-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3380+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3381+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3382
3383 #include "csumpartialcopygeneric.S"
3384
3385diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3386index 64dbfa5..84a3fd9 100644
3387--- a/arch/arm/lib/delay.c
3388+++ b/arch/arm/lib/delay.c
3389@@ -28,7 +28,7 @@
3390 /*
3391 * Default to the loop-based delay implementation.
3392 */
3393-struct arm_delay_ops arm_delay_ops = {
3394+struct arm_delay_ops arm_delay_ops __read_only = {
3395 .delay = __loop_delay,
3396 .const_udelay = __loop_const_udelay,
3397 .udelay = __loop_udelay,
3398diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3399index 025f742..8432b08 100644
3400--- a/arch/arm/lib/uaccess_with_memcpy.c
3401+++ b/arch/arm/lib/uaccess_with_memcpy.c
3402@@ -104,7 +104,7 @@ out:
3403 }
3404
3405 unsigned long
3406-__copy_to_user(void __user *to, const void *from, unsigned long n)
3407+___copy_to_user(void __user *to, const void *from, unsigned long n)
3408 {
3409 /*
3410 * This test is stubbed out of the main function above to keep
3411diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
3412index f389228..592ef66 100644
3413--- a/arch/arm/mach-kirkwood/common.c
3414+++ b/arch/arm/mach-kirkwood/common.c
3415@@ -149,7 +149,16 @@ static void clk_gate_fn_disable(struct clk_hw *hw)
3416 clk_gate_ops.disable(hw);
3417 }
3418
3419-static struct clk_ops clk_gate_fn_ops;
3420+static int clk_gate_fn_is_enabled(struct clk_hw *hw)
3421+{
3422+ return clk_gate_ops.is_enabled(hw);
3423+}
3424+
3425+static struct clk_ops clk_gate_fn_ops = {
3426+ .enable = clk_gate_fn_enable,
3427+ .disable = clk_gate_fn_disable,
3428+ .is_enabled = clk_gate_fn_is_enabled,
3429+};
3430
3431 static struct clk __init *clk_register_gate_fn(struct device *dev,
3432 const char *name,
3433@@ -183,14 +192,6 @@ static struct clk __init *clk_register_gate_fn(struct device *dev,
3434 gate_fn->fn_en = fn_en;
3435 gate_fn->fn_dis = fn_dis;
3436
3437- /* ops is the gate ops, but with our enable/disable functions */
3438- if (clk_gate_fn_ops.enable != clk_gate_fn_enable ||
3439- clk_gate_fn_ops.disable != clk_gate_fn_disable) {
3440- clk_gate_fn_ops = clk_gate_ops;
3441- clk_gate_fn_ops.enable = clk_gate_fn_enable;
3442- clk_gate_fn_ops.disable = clk_gate_fn_disable;
3443- }
3444-
3445 clk = clk_register(dev, &gate_fn->gate.hw);
3446
3447 if (IS_ERR(clk))
3448diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3449index f6eeb87..cc90868 100644
3450--- a/arch/arm/mach-omap2/board-n8x0.c
3451+++ b/arch/arm/mach-omap2/board-n8x0.c
3452@@ -631,7 +631,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3453 }
3454 #endif
3455
3456-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3457+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3458 .late_init = n8x0_menelaus_late_init,
3459 };
3460
3461diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
3462index 6c4da12..d9ca72d 100644
3463--- a/arch/arm/mach-omap2/gpmc.c
3464+++ b/arch/arm/mach-omap2/gpmc.c
3465@@ -147,7 +147,6 @@ struct omap3_gpmc_regs {
3466 };
3467
3468 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
3469-static struct irq_chip gpmc_irq_chip;
3470 static unsigned gpmc_irq_start;
3471
3472 static struct resource gpmc_mem_root;
3473@@ -711,6 +710,18 @@ static void gpmc_irq_noop(struct irq_data *data) { }
3474
3475 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
3476
3477+static struct irq_chip gpmc_irq_chip = {
3478+ .name = "gpmc",
3479+ .irq_startup = gpmc_irq_noop_ret,
3480+ .irq_enable = gpmc_irq_enable,
3481+ .irq_disable = gpmc_irq_disable,
3482+ .irq_shutdown = gpmc_irq_noop,
3483+ .irq_ack = gpmc_irq_noop,
3484+ .irq_mask = gpmc_irq_noop,
3485+ .irq_unmask = gpmc_irq_noop,
3486+
3487+};
3488+
3489 static int gpmc_setup_irq(void)
3490 {
3491 int i;
3492@@ -725,15 +736,6 @@ static int gpmc_setup_irq(void)
3493 return gpmc_irq_start;
3494 }
3495
3496- gpmc_irq_chip.name = "gpmc";
3497- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
3498- gpmc_irq_chip.irq_enable = gpmc_irq_enable;
3499- gpmc_irq_chip.irq_disable = gpmc_irq_disable;
3500- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
3501- gpmc_irq_chip.irq_ack = gpmc_irq_noop;
3502- gpmc_irq_chip.irq_mask = gpmc_irq_noop;
3503- gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
3504-
3505 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
3506 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
3507
3508diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3509index f8bb3b9..831e7b8 100644
3510--- a/arch/arm/mach-omap2/omap-wakeupgen.c
3511+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3512@@ -339,7 +339,7 @@ static int __cpuinit irq_cpu_hotplug_notify(struct notifier_block *self,
3513 return NOTIFY_OK;
3514 }
3515
3516-static struct notifier_block __refdata irq_hotplug_notifier = {
3517+static struct notifier_block irq_hotplug_notifier = {
3518 .notifier_call = irq_cpu_hotplug_notify,
3519 };
3520
3521diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3522index e6d2307..d057195 100644
3523--- a/arch/arm/mach-omap2/omap_device.c
3524+++ b/arch/arm/mach-omap2/omap_device.c
3525@@ -499,7 +499,7 @@ void omap_device_delete(struct omap_device *od)
3526 struct platform_device __init *omap_device_build(const char *pdev_name,
3527 int pdev_id,
3528 struct omap_hwmod *oh,
3529- void *pdata, int pdata_len)
3530+ const void *pdata, int pdata_len)
3531 {
3532 struct omap_hwmod *ohs[] = { oh };
3533
3534@@ -527,7 +527,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
3535 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
3536 int pdev_id,
3537 struct omap_hwmod **ohs,
3538- int oh_cnt, void *pdata,
3539+ int oh_cnt, const void *pdata,
3540 int pdata_len)
3541 {
3542 int ret = -ENOMEM;
3543diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3544index 044c31d..2ee0861 100644
3545--- a/arch/arm/mach-omap2/omap_device.h
3546+++ b/arch/arm/mach-omap2/omap_device.h
3547@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
3548 /* Core code interface */
3549
3550 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3551- struct omap_hwmod *oh, void *pdata,
3552+ struct omap_hwmod *oh, const void *pdata,
3553 int pdata_len);
3554
3555 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3556 struct omap_hwmod **oh, int oh_cnt,
3557- void *pdata, int pdata_len);
3558+ const void *pdata, int pdata_len);
3559
3560 struct omap_device *omap_device_alloc(struct platform_device *pdev,
3561 struct omap_hwmod **ohs, int oh_cnt);
3562diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3563index 7341eff..fd75e34 100644
3564--- a/arch/arm/mach-omap2/omap_hwmod.c
3565+++ b/arch/arm/mach-omap2/omap_hwmod.c
3566@@ -194,10 +194,10 @@ struct omap_hwmod_soc_ops {
3567 int (*init_clkdm)(struct omap_hwmod *oh);
3568 void (*update_context_lost)(struct omap_hwmod *oh);
3569 int (*get_context_lost)(struct omap_hwmod *oh);
3570-};
3571+} __no_const;
3572
3573 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3574-static struct omap_hwmod_soc_ops soc_ops;
3575+static struct omap_hwmod_soc_ops soc_ops __read_only;
3576
3577 /* omap_hwmod_list contains all registered struct omap_hwmods */
3578 static LIST_HEAD(omap_hwmod_list);
3579diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3580index d15c7bb..b2d1f0c 100644
3581--- a/arch/arm/mach-omap2/wd_timer.c
3582+++ b/arch/arm/mach-omap2/wd_timer.c
3583@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3584 struct omap_hwmod *oh;
3585 char *oh_name = "wd_timer2";
3586 char *dev_name = "omap_wdt";
3587- struct omap_wd_timer_platform_data pdata;
3588+ static struct omap_wd_timer_platform_data pdata = {
3589+ .read_reset_sources = prm_read_reset_sources
3590+ };
3591
3592 if (!cpu_class_is_omap2() || of_have_populated_dt())
3593 return 0;
3594@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3595 return -EINVAL;
3596 }
3597
3598- pdata.read_reset_sources = prm_read_reset_sources;
3599-
3600 pdev = omap_device_build(dev_name, id, oh, &pdata,
3601 sizeof(struct omap_wd_timer_platform_data));
3602 WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
3603diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
3604index 0cdba8d..297993e 100644
3605--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
3606+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
3607@@ -181,7 +181,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
3608 bool entered_lp2 = false;
3609
3610 if (tegra_pending_sgi())
3611- ACCESS_ONCE(abort_flag) = true;
3612+ ACCESS_ONCE_RW(abort_flag) = true;
3613
3614 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
3615
3616diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
3617index cad3ca86..1d79e0f 100644
3618--- a/arch/arm/mach-ux500/setup.h
3619+++ b/arch/arm/mach-ux500/setup.h
3620@@ -37,13 +37,6 @@ extern void ux500_timer_init(void);
3621 .type = MT_DEVICE, \
3622 }
3623
3624-#define __MEM_DEV_DESC(x, sz) { \
3625- .virtual = IO_ADDRESS(x), \
3626- .pfn = __phys_to_pfn(x), \
3627- .length = sz, \
3628- .type = MT_MEMORY, \
3629-}
3630-
3631 extern struct smp_operations ux500_smp_ops;
3632 extern void ux500_cpu_die(unsigned int cpu);
3633
3634diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
e2b79cd1 3635index 08c9fe9..191320c 100644
bb5f0bf8
AF
3636--- a/arch/arm/mm/Kconfig
3637+++ b/arch/arm/mm/Kconfig
3638@@ -436,7 +436,7 @@ config CPU_32v5
3639
3640 config CPU_32v6
3641 bool
3642- select CPU_USE_DOMAINS if CPU_V6 && MMU
3643+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3644 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3645
3646 config CPU_32v6K
3647@@ -585,6 +585,7 @@ config CPU_CP15_MPU
3648
3649 config CPU_USE_DOMAINS
3650 bool
3651+ depends on !ARM_LPAE && !PAX_KERNEXEC
3652 help
3653 This option enables or disables the use of domain switching
3654 via the set_fs() function.
3655@@ -780,6 +781,7 @@ config NEED_KUSER_HELPERS
3656 config KUSER_HELPERS
3657 bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
3658 default y
3659+ depends on !(CPU_V6 || CPU_V6K || CPU_V7)
3660 help
3661 Warning: disabling this option may break user programs.
3662
e2b79cd1
AF
3663@@ -792,7 +794,7 @@ config KUSER_HELPERS
3664 See Documentation/arm/kernel_user_helpers.txt for details.
bb5f0bf8
AF
3665
3666 However, the fixed address nature of these helpers can be used
3667- by ROP (return orientated programming) authors when creating
3668+ by ROP (Return Oriented Programming) authors when creating
3669 exploits.
3670
3671 If all of the binaries and libraries which run on your platform
3672diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3673index 6f4585b..7b6f52b 100644
3674--- a/arch/arm/mm/alignment.c
3675+++ b/arch/arm/mm/alignment.c
3676@@ -211,10 +211,12 @@ union offset_union {
3677 #define __get16_unaligned_check(ins,val,addr) \
3678 do { \
3679 unsigned int err = 0, v, a = addr; \
3680+ pax_open_userland(); \
3681 __get8_unaligned_check(ins,v,a,err); \
3682 val = v << ((BE) ? 8 : 0); \
3683 __get8_unaligned_check(ins,v,a,err); \
3684 val |= v << ((BE) ? 0 : 8); \
3685+ pax_close_userland(); \
3686 if (err) \
3687 goto fault; \
3688 } while (0)
3689@@ -228,6 +230,7 @@ union offset_union {
3690 #define __get32_unaligned_check(ins,val,addr) \
3691 do { \
3692 unsigned int err = 0, v, a = addr; \
3693+ pax_open_userland(); \
3694 __get8_unaligned_check(ins,v,a,err); \
3695 val = v << ((BE) ? 24 : 0); \
3696 __get8_unaligned_check(ins,v,a,err); \
3697@@ -236,6 +239,7 @@ union offset_union {
3698 val |= v << ((BE) ? 8 : 16); \
3699 __get8_unaligned_check(ins,v,a,err); \
3700 val |= v << ((BE) ? 0 : 24); \
3701+ pax_close_userland(); \
3702 if (err) \
3703 goto fault; \
3704 } while (0)
3705@@ -249,6 +253,7 @@ union offset_union {
3706 #define __put16_unaligned_check(ins,val,addr) \
3707 do { \
3708 unsigned int err = 0, v = val, a = addr; \
3709+ pax_open_userland(); \
3710 __asm__( FIRST_BYTE_16 \
3711 ARM( "1: "ins" %1, [%2], #1\n" ) \
3712 THUMB( "1: "ins" %1, [%2]\n" ) \
3713@@ -268,6 +273,7 @@ union offset_union {
3714 " .popsection\n" \
3715 : "=r" (err), "=&r" (v), "=&r" (a) \
3716 : "0" (err), "1" (v), "2" (a)); \
3717+ pax_close_userland(); \
3718 if (err) \
3719 goto fault; \
3720 } while (0)
3721@@ -281,6 +287,7 @@ union offset_union {
3722 #define __put32_unaligned_check(ins,val,addr) \
3723 do { \
3724 unsigned int err = 0, v = val, a = addr; \
3725+ pax_open_userland(); \
3726 __asm__( FIRST_BYTE_32 \
3727 ARM( "1: "ins" %1, [%2], #1\n" ) \
3728 THUMB( "1: "ins" %1, [%2]\n" ) \
3729@@ -310,6 +317,7 @@ union offset_union {
3730 " .popsection\n" \
3731 : "=r" (err), "=&r" (v), "=&r" (a) \
3732 : "0" (err), "1" (v), "2" (a)); \
3733+ pax_close_userland(); \
3734 if (err) \
3735 goto fault; \
3736 } while (0)
3737diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
e2b79cd1 3738index 5dbf13f..a2d1876 100644
bb5f0bf8
AF
3739--- a/arch/arm/mm/fault.c
3740+++ b/arch/arm/mm/fault.c
3741@@ -25,6 +25,7 @@
3742 #include <asm/system_misc.h>
3743 #include <asm/system_info.h>
3744 #include <asm/tlbflush.h>
3745+#include <asm/sections.h>
3746
3747 #include "fault.h"
3748
3749@@ -138,6 +139,20 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
3750 if (fixup_exception(regs))
3751 return;
3752
3753+#ifdef CONFIG_PAX_KERNEXEC
3754+ if ((fsr & FSR_WRITE) &&
3755+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
3756+ (MODULES_VADDR <= addr && addr < MODULES_END)))
3757+ {
3758+ if (current->signal->curr_ip)
3759+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3760+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3761+ else
3762+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
3763+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3764+ }
3765+#endif
3766+
3767 /*
3768 * No handler, we'll have to terminate things with extreme prejudice.
3769 */
3770@@ -174,6 +189,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
3771 }
3772 #endif
3773
3774+#ifdef CONFIG_PAX_PAGEEXEC
3775+ if (fsr & FSR_LNX_PF) {
3776+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
3777+ do_group_exit(SIGKILL);
3778+ }
3779+#endif
3780+
3781 tsk->thread.address = addr;
3782 tsk->thread.error_code = fsr;
3783 tsk->thread.trap_no = 14;
3784@@ -398,6 +420,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3785 }
3786 #endif /* CONFIG_MMU */
3787
3788+#ifdef CONFIG_PAX_PAGEEXEC
3789+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3790+{
3791+ long i;
3792+
3793+ printk(KERN_ERR "PAX: bytes at PC: ");
3794+ for (i = 0; i < 20; i++) {
3795+ unsigned char c;
3796+ if (get_user(c, (__force unsigned char __user *)pc+i))
3797+ printk(KERN_CONT "?? ");
3798+ else
3799+ printk(KERN_CONT "%02x ", c);
3800+ }
3801+ printk("\n");
3802+
3803+ printk(KERN_ERR "PAX: bytes at SP-4: ");
3804+ for (i = -1; i < 20; i++) {
3805+ unsigned long c;
3806+ if (get_user(c, (__force unsigned long __user *)sp+i))
3807+ printk(KERN_CONT "???????? ");
3808+ else
3809+ printk(KERN_CONT "%08lx ", c);
3810+ }
3811+ printk("\n");
3812+}
3813+#endif
3814+
3815 /*
3816 * First Level Translation Fault Handler
3817 *
3818@@ -543,9 +592,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3819 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
3820 struct siginfo info;
3821
3822+#ifdef CONFIG_PAX_MEMORY_UDEREF
3823+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
3824+ if (current->signal->curr_ip)
3825+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3826+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3827+ else
3828+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3829+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3830+ goto die;
3831+ }
3832+#endif
3833+
3834 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
3835 return;
3836
3837+die:
3838 printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
3839 inf->name, fsr, addr);
3840
e2b79cd1 3841@@ -569,15 +631,68 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
bb5f0bf8
AF
3842 ifsr_info[nr].name = name;
3843 }
3844
3845+asmlinkage int sys_sigreturn(struct pt_regs *regs);
3846+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
3847+
3848 asmlinkage void __exception
3849 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
3850 {
3851 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
3852 struct siginfo info;
e2b79cd1
AF
3853+ unsigned long pc = instruction_pointer(regs);
3854+
bb5f0bf8
AF
3855+ if (user_mode(regs)) {
3856+ unsigned long sigpage = current->mm->context.sigpage;
3857+
e2b79cd1
AF
3858+ if (sigpage <= pc && pc < sigpage + 7*4) {
3859+ if (pc < sigpage + 3*4)
bb5f0bf8
AF
3860+ sys_sigreturn(regs);
3861+ else
3862+ sys_rt_sigreturn(regs);
3863+ return;
3864+ }
e2b79cd1 3865+ if (pc == 0xffff0fe0UL) {
bb5f0bf8
AF
3866+ /*
3867+ * PaX: __kuser_get_tls emulation
3868+ */
3869+ regs->ARM_r0 = current_thread_info()->tp_value;
3870+ regs->ARM_pc = regs->ARM_lr;
3871+ return;
3872+ }
3873+ }
3874+
3875+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3876+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
3877+ if (current->signal->curr_ip)
3878+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3879+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
e2b79cd1 3880+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
bb5f0bf8
AF
3881+ else
3882+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
3883+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
e2b79cd1 3884+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
bb5f0bf8
AF
3885+ goto die;
3886+ }
3887+#endif
3888+
3889+#ifdef CONFIG_PAX_REFCOUNT
3890+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
3891+ unsigned int bkpt;
3892+
e2b79cd1 3893+ if (!probe_kernel_address((unsigned int *)pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
bb5f0bf8
AF
3894+ current->thread.error_code = ifsr;
3895+ current->thread.trap_no = 0;
3896+ pax_report_refcount_overflow(regs);
3897+ fixup_exception(regs);
3898+ return;
3899+ }
3900+ }
3901+#endif
e2b79cd1 3902
bb5f0bf8
AF
3903 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
3904 return;
3905
3906+die:
3907 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
3908 inf->name, ifsr, addr);
3909
3910diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
3911index cf08bdf..772656c 100644
3912--- a/arch/arm/mm/fault.h
3913+++ b/arch/arm/mm/fault.h
3914@@ -3,6 +3,7 @@
3915
3916 /*
3917 * Fault status register encodings. We steal bit 31 for our own purposes.
3918+ * Set when the FSR value is from an instruction fault.
3919 */
3920 #define FSR_LNX_PF (1 << 31)
3921 #define FSR_WRITE (1 << 11)
3922@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
3923 }
3924 #endif
3925
3926+/* valid for LPAE and !LPAE */
3927+static inline int is_xn_fault(unsigned int fsr)
3928+{
3929+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
3930+}
3931+
3932+static inline int is_domain_fault(unsigned int fsr)
3933+{
3934+ return ((fsr_fs(fsr) & 0xD) == 0x9);
3935+}
3936+
3937 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
3938 unsigned long search_exception_table(unsigned long addr);
3939
3940diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
3941index 0ecc43f..190b956 100644
3942--- a/arch/arm/mm/init.c
3943+++ b/arch/arm/mm/init.c
3944@@ -30,6 +30,8 @@
3945 #include <asm/setup.h>
3946 #include <asm/tlb.h>
3947 #include <asm/fixmap.h>
3948+#include <asm/system_info.h>
3949+#include <asm/cp15.h>
3950
3951 #include <asm/mach/arch.h>
3952 #include <asm/mach/map.h>
3953@@ -726,7 +728,46 @@ void free_initmem(void)
3954 {
3955 #ifdef CONFIG_HAVE_TCM
3956 extern char __tcm_start, __tcm_end;
3957+#endif
3958
3959+#ifdef CONFIG_PAX_KERNEXEC
3960+ unsigned long addr;
3961+ pgd_t *pgd;
3962+ pud_t *pud;
3963+ pmd_t *pmd;
3964+ int cpu_arch = cpu_architecture();
3965+ unsigned int cr = get_cr();
3966+
3967+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
3968+ /* make pages tables, etc before .text NX */
3969+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
3970+ pgd = pgd_offset_k(addr);
3971+ pud = pud_offset(pgd, addr);
3972+ pmd = pmd_offset(pud, addr);
3973+ __section_update(pmd, addr, PMD_SECT_XN);
3974+ }
3975+ /* make init NX */
3976+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
3977+ pgd = pgd_offset_k(addr);
3978+ pud = pud_offset(pgd, addr);
3979+ pmd = pmd_offset(pud, addr);
3980+ __section_update(pmd, addr, PMD_SECT_XN);
3981+ }
3982+ /* make kernel code/rodata RX */
3983+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
3984+ pgd = pgd_offset_k(addr);
3985+ pud = pud_offset(pgd, addr);
3986+ pmd = pmd_offset(pud, addr);
3987+#ifdef CONFIG_ARM_LPAE
3988+ __section_update(pmd, addr, PMD_SECT_RDONLY);
3989+#else
3990+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
3991+#endif
3992+ }
3993+ }
3994+#endif
3995+
3996+#ifdef CONFIG_HAVE_TCM
3997 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
3998 free_reserved_area(&__tcm_start, &__tcm_end, 0, "TCM link");
3999 #endif
4000diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
4001index 04d9006..c547d85 100644
4002--- a/arch/arm/mm/ioremap.c
4003+++ b/arch/arm/mm/ioremap.c
4004@@ -392,9 +392,9 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
4005 unsigned int mtype;
4006
4007 if (cached)
4008- mtype = MT_MEMORY;
4009+ mtype = MT_MEMORY_RX;
4010 else
4011- mtype = MT_MEMORY_NONCACHED;
4012+ mtype = MT_MEMORY_NONCACHED_RX;
4013
4014 return __arm_ioremap_caller(phys_addr, size, mtype,
4015 __builtin_return_address(0));
4016diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
4017index 10062ce..8695745 100644
4018--- a/arch/arm/mm/mmap.c
4019+++ b/arch/arm/mm/mmap.c
4020@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4021 struct vm_area_struct *vma;
4022 int do_align = 0;
4023 int aliasing = cache_is_vipt_aliasing();
4024+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4025 struct vm_unmapped_area_info info;
4026
4027 /*
4028@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4029 if (len > TASK_SIZE)
4030 return -ENOMEM;
4031
4032+#ifdef CONFIG_PAX_RANDMMAP
4033+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4034+#endif
4035+
4036 if (addr) {
4037 if (do_align)
4038 addr = COLOUR_ALIGN(addr, pgoff);
4039@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4040 addr = PAGE_ALIGN(addr);
4041
4042 vma = find_vma(mm, addr);
4043- if (TASK_SIZE - len >= addr &&
4044- (!vma || addr + len <= vma->vm_start))
4045+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4046 return addr;
4047 }
4048
4049@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4050 info.high_limit = TASK_SIZE;
4051 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4052 info.align_offset = pgoff << PAGE_SHIFT;
4053+ info.threadstack_offset = offset;
4054 return vm_unmapped_area(&info);
4055 }
4056
4057@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4058 unsigned long addr = addr0;
4059 int do_align = 0;
4060 int aliasing = cache_is_vipt_aliasing();
4061+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4062 struct vm_unmapped_area_info info;
4063
4064 /*
4065@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4066 return addr;
4067 }
4068
4069+#ifdef CONFIG_PAX_RANDMMAP
4070+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4071+#endif
4072+
4073 /* requesting a specific address */
4074 if (addr) {
4075 if (do_align)
4076@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4077 else
4078 addr = PAGE_ALIGN(addr);
4079 vma = find_vma(mm, addr);
4080- if (TASK_SIZE - len >= addr &&
4081- (!vma || addr + len <= vma->vm_start))
4082+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4083 return addr;
4084 }
4085
4086@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4087 info.high_limit = mm->mmap_base;
4088 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4089 info.align_offset = pgoff << PAGE_SHIFT;
4090+ info.threadstack_offset = offset;
4091 addr = vm_unmapped_area(&info);
4092
4093 /*
4094@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4095 {
4096 unsigned long random_factor = 0UL;
4097
4098+#ifdef CONFIG_PAX_RANDMMAP
4099+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4100+#endif
4101+
4102 /* 8 bits of randomness in 20 address space bits */
4103 if ((current->flags & PF_RANDOMIZE) &&
4104 !(current->personality & ADDR_NO_RANDOMIZE))
4105@@ -180,10 +194,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4106
4107 if (mmap_is_legacy()) {
4108 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4109+
4110+#ifdef CONFIG_PAX_RANDMMAP
4111+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4112+ mm->mmap_base += mm->delta_mmap;
4113+#endif
4114+
4115 mm->get_unmapped_area = arch_get_unmapped_area;
4116 mm->unmap_area = arch_unmap_area;
4117 } else {
4118 mm->mmap_base = mmap_base(random_factor);
4119+
4120+#ifdef CONFIG_PAX_RANDMMAP
4121+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4122+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4123+#endif
4124+
4125 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4126 mm->unmap_area = arch_unmap_area_topdown;
4127 }
4128diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
4129index daf336f..4e6392c 100644
4130--- a/arch/arm/mm/mmu.c
4131+++ b/arch/arm/mm/mmu.c
4132@@ -36,6 +36,22 @@
4133 #include "mm.h"
4134 #include "tcm.h"
4135
4136+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4137+void modify_domain(unsigned int dom, unsigned int type)
4138+{
4139+ struct thread_info *thread = current_thread_info();
4140+ unsigned int domain = thread->cpu_domain;
4141+ /*
4142+ * DOMAIN_MANAGER might be defined to some other value,
4143+ * use the arch-defined constant
4144+ */
4145+ domain &= ~domain_val(dom, 3);
4146+ thread->cpu_domain = domain | domain_val(dom, type);
4147+ set_domain(thread->cpu_domain);
4148+}
4149+EXPORT_SYMBOL(modify_domain);
4150+#endif
4151+
4152 /*
4153 * empty_zero_page is a special page that is used for
4154 * zero-initialized data and COW.
4155@@ -228,10 +244,18 @@ __setup("noalign", noalign_setup);
4156
4157 #endif /* ifdef CONFIG_CPU_CP15 / else */
4158
4159-#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
4160+#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY
4161 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4162
4163-static struct mem_type mem_types[] = {
4164+#ifdef CONFIG_PAX_KERNEXEC
4165+#define L_PTE_KERNEXEC L_PTE_RDONLY
4166+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4167+#else
4168+#define L_PTE_KERNEXEC L_PTE_DIRTY
4169+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4170+#endif
4171+
4172+static struct mem_type mem_types[] __read_only = {
4173 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4174 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4175 L_PTE_SHARED,
4176@@ -260,16 +284,16 @@ static struct mem_type mem_types[] = {
4177 [MT_UNCACHED] = {
4178 .prot_pte = PROT_PTE_DEVICE,
4179 .prot_l1 = PMD_TYPE_TABLE,
4180- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4181+ .prot_sect = PROT_SECT_DEVICE,
4182 .domain = DOMAIN_IO,
4183 },
4184 [MT_CACHECLEAN] = {
4185- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4186+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4187 .domain = DOMAIN_KERNEL,
4188 },
4189 #ifndef CONFIG_ARM_LPAE
4190 [MT_MINICLEAN] = {
4191- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4192+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_RDONLY,
4193 .domain = DOMAIN_KERNEL,
4194 },
4195 #endif
4196@@ -277,36 +301,54 @@ static struct mem_type mem_types[] = {
4197 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4198 L_PTE_RDONLY,
4199 .prot_l1 = PMD_TYPE_TABLE,
4200- .domain = DOMAIN_USER,
4201+ .domain = DOMAIN_VECTORS,
4202 },
4203 [MT_HIGH_VECTORS] = {
4204 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4205 L_PTE_USER | L_PTE_RDONLY,
4206 .prot_l1 = PMD_TYPE_TABLE,
4207- .domain = DOMAIN_USER,
4208+ .domain = DOMAIN_VECTORS,
4209 },
4210- [MT_MEMORY] = {
4211+ [MT_MEMORY_RWX] = {
4212 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4213 .prot_l1 = PMD_TYPE_TABLE,
4214 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4215 .domain = DOMAIN_KERNEL,
4216 },
4217+ [MT_MEMORY_RW] = {
4218+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4219+ .prot_l1 = PMD_TYPE_TABLE,
4220+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4221+ .domain = DOMAIN_KERNEL,
4222+ },
4223+ [MT_MEMORY_RX] = {
4224+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4225+ .prot_l1 = PMD_TYPE_TABLE,
4226+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4227+ .domain = DOMAIN_KERNEL,
4228+ },
4229 [MT_ROM] = {
4230- .prot_sect = PMD_TYPE_SECT,
4231+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4232 .domain = DOMAIN_KERNEL,
4233 },
4234- [MT_MEMORY_NONCACHED] = {
4235+ [MT_MEMORY_NONCACHED_RW] = {
4236 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4237 L_PTE_MT_BUFFERABLE,
4238 .prot_l1 = PMD_TYPE_TABLE,
4239 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4240 .domain = DOMAIN_KERNEL,
4241 },
4242+ [MT_MEMORY_NONCACHED_RX] = {
4243+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4244+ L_PTE_MT_BUFFERABLE,
4245+ .prot_l1 = PMD_TYPE_TABLE,
4246+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4247+ .domain = DOMAIN_KERNEL,
4248+ },
4249 [MT_MEMORY_DTCM] = {
4250- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4251- L_PTE_XN,
4252+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4253 .prot_l1 = PMD_TYPE_TABLE,
4254- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4255+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4256 .domain = DOMAIN_KERNEL,
4257 },
4258 [MT_MEMORY_ITCM] = {
4259@@ -316,10 +358,10 @@ static struct mem_type mem_types[] = {
4260 },
4261 [MT_MEMORY_SO] = {
4262 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4263- L_PTE_MT_UNCACHED | L_PTE_XN,
4264+ L_PTE_MT_UNCACHED,
4265 .prot_l1 = PMD_TYPE_TABLE,
4266 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
4267- PMD_SECT_UNCACHED | PMD_SECT_XN,
4268+ PMD_SECT_UNCACHED,
4269 .domain = DOMAIN_KERNEL,
4270 },
4271 [MT_MEMORY_DMA_READY] = {
4272@@ -405,9 +447,35 @@ static void __init build_mem_type_table(void)
4273 * to prevent speculative instruction fetches.
4274 */
4275 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
4276+ mem_types[MT_DEVICE].prot_pte |= L_PTE_XN;
4277 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
4278+ mem_types[MT_DEVICE_NONSHARED].prot_pte |= L_PTE_XN;
4279 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
4280+ mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_XN;
4281 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
4282+ mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_XN;
4283+
4284+ /* Mark other regions on ARMv6+ as execute-never */
4285+
4286+#ifdef CONFIG_PAX_KERNEXEC
4287+ mem_types[MT_UNCACHED].prot_sect |= PMD_SECT_XN;
4288+ mem_types[MT_UNCACHED].prot_pte |= L_PTE_XN;
4289+ mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_XN;
4290+ mem_types[MT_CACHECLEAN].prot_pte |= L_PTE_XN;
4291+#ifndef CONFIG_ARM_LPAE
4292+ mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_XN;
4293+ mem_types[MT_MINICLEAN].prot_pte |= L_PTE_XN;
4294+#endif
4295+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
4296+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_XN;
4297+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_XN;
4298+ mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= PMD_SECT_XN;
4299+ mem_types[MT_MEMORY_DTCM].prot_sect |= PMD_SECT_XN;
4300+ mem_types[MT_MEMORY_DTCM].prot_pte |= L_PTE_XN;
4301+#endif
4302+
4303+ mem_types[MT_MEMORY_SO].prot_sect |= PMD_SECT_XN;
4304+ mem_types[MT_MEMORY_SO].prot_pte |= L_PTE_XN;
4305 }
4306 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4307 /*
4308@@ -468,6 +536,9 @@ static void __init build_mem_type_table(void)
4309 * from SVC mode and no access from userspace.
4310 */
4311 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4312+#ifdef CONFIG_PAX_KERNEXEC
4313+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4314+#endif
4315 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4316 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4317 #endif
4318@@ -485,11 +556,17 @@ static void __init build_mem_type_table(void)
4319 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4320 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4321 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4322- mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
4323- mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
4324+ mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4325+ mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4326+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4327+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4328+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4329+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4330 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4331- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
4332- mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
4333+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_S;
4334+ mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= L_PTE_SHARED;
4335+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_S;
4336+ mem_types[MT_MEMORY_NONCACHED_RX].prot_pte |= L_PTE_SHARED;
4337 }
4338 }
4339
4340@@ -500,15 +577,20 @@ static void __init build_mem_type_table(void)
4341 if (cpu_arch >= CPU_ARCH_ARMv6) {
4342 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4343 /* Non-cacheable Normal is XCB = 001 */
4344- mem_types[MT_MEMORY_NONCACHED].prot_sect |=
4345+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
4346+ PMD_SECT_BUFFERED;
4347+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
4348 PMD_SECT_BUFFERED;
4349 } else {
4350 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4351- mem_types[MT_MEMORY_NONCACHED].prot_sect |=
4352+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
4353+ PMD_SECT_TEX(1);
4354+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
4355 PMD_SECT_TEX(1);
4356 }
4357 } else {
4358- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4359+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_BUFFERABLE;
4360+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_BUFFERABLE;
4361 }
4362
4363 #ifdef CONFIG_ARM_LPAE
4364@@ -524,6 +606,8 @@ static void __init build_mem_type_table(void)
4365 vecs_pgprot |= PTE_EXT_AF;
4366 #endif
4367
4368+ user_pgprot |= __supported_pte_mask;
4369+
4370 for (i = 0; i < 16; i++) {
4371 pteval_t v = pgprot_val(protection_map[i]);
4372 protection_map[i] = __pgprot(v | user_pgprot);
4373@@ -541,10 +625,15 @@ static void __init build_mem_type_table(void)
4374
4375 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4376 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4377- mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
4378- mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
4379+ mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4380+ mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4381+ mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4382+ mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4383+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4384+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4385 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4386- mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
4387+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= ecc_mask;
4388+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= ecc_mask;
4389 mem_types[MT_ROM].prot_sect |= cp->pmd;
4390
4391 switch (cp->pmd) {
4392@@ -1166,18 +1255,15 @@ void __init arm_mm_memblock_reserve(void)
4393 * called function. This means you can't use any function or debugging
4394 * method which may touch any device, otherwise the kernel _will_ crash.
4395 */
4396+
4397+static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
4398+
4399 static void __init devicemaps_init(struct machine_desc *mdesc)
4400 {
4401 struct map_desc map;
4402 unsigned long addr;
4403- void *vectors;
4404
4405- /*
4406- * Allocate the vector page early.
4407- */
4408- vectors = early_alloc(PAGE_SIZE * 2);
4409-
4410- early_trap_init(vectors);
4411+ early_trap_init(&vectors);
4412
4413 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4414 pmd_clear(pmd_off_k(addr));
4415@@ -1217,7 +1303,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
4416 * location (0xffff0000). If we aren't using high-vectors, also
4417 * create a mapping at the low-vectors virtual address.
4418 */
4419- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4420+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4421 map.virtual = 0xffff0000;
4422 map.length = PAGE_SIZE;
4423 #ifdef CONFIG_KUSER_HELPERS
4424@@ -1287,8 +1373,39 @@ static void __init map_lowmem(void)
4425 map.pfn = __phys_to_pfn(start);
4426 map.virtual = __phys_to_virt(start);
4427 map.length = end - start;
4428- map.type = MT_MEMORY;
4429
4430+#ifdef CONFIG_PAX_KERNEXEC
4431+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4432+ struct map_desc kernel;
4433+ struct map_desc initmap;
4434+
4435+ /* when freeing initmem we will make this RW */
4436+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4437+ initmap.virtual = (unsigned long)__init_begin;
4438+ initmap.length = _sdata - __init_begin;
4439+ initmap.type = MT_MEMORY_RWX;
4440+ create_mapping(&initmap);
4441+
4442+ /* when freeing initmem we will make this RX */
4443+ kernel.pfn = __phys_to_pfn(__pa(_stext));
4444+ kernel.virtual = (unsigned long)_stext;
4445+ kernel.length = __init_begin - _stext;
4446+ kernel.type = MT_MEMORY_RWX;
4447+ create_mapping(&kernel);
4448+
4449+ if (map.virtual < (unsigned long)_stext) {
4450+ map.length = (unsigned long)_stext - map.virtual;
4451+ map.type = MT_MEMORY_RWX;
4452+ create_mapping(&map);
4453+ }
4454+
4455+ map.pfn = __phys_to_pfn(__pa(_sdata));
4456+ map.virtual = (unsigned long)_sdata;
4457+ map.length = end - __pa(_sdata);
4458+ }
4459+#endif
4460+
4461+ map.type = MT_MEMORY_RW;
4462 create_mapping(&map);
4463 }
4464 }
4465diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4466index a5bc92d..0bb4730 100644
4467--- a/arch/arm/plat-omap/sram.c
4468+++ b/arch/arm/plat-omap/sram.c
4469@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4470 * Looks like we need to preserve some bootloader code at the
4471 * beginning of SRAM for jumping to flash for reboot to work...
4472 */
4473+ pax_open_kernel();
4474 memset_io(omap_sram_base + omap_sram_skip, 0,
4475 omap_sram_size - omap_sram_skip);
4476+ pax_close_kernel();
4477 }
4478diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
4479index ce6d763..cfea917 100644
4480--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
4481+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
4482@@ -47,7 +47,7 @@ struct samsung_dma_ops {
4483 int (*started)(unsigned ch);
4484 int (*flush)(unsigned ch);
4485 int (*stop)(unsigned ch);
4486-};
4487+} __no_const;
4488
4489 extern void *samsung_dmadev_get_ops(void);
4490 extern void *s3c_dma_get_ops(void);
4491diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
4492index f4726dc..39ed646 100644
4493--- a/arch/arm64/kernel/debug-monitors.c
4494+++ b/arch/arm64/kernel/debug-monitors.c
4495@@ -149,7 +149,7 @@ static int __cpuinit os_lock_notify(struct notifier_block *self,
4496 return NOTIFY_OK;
4497 }
4498
4499-static struct notifier_block __cpuinitdata os_lock_nb = {
4500+static struct notifier_block os_lock_nb = {
4501 .notifier_call = os_lock_notify,
4502 };
4503
4504diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
4505index 5ab825c..96aaec8 100644
4506--- a/arch/arm64/kernel/hw_breakpoint.c
4507+++ b/arch/arm64/kernel/hw_breakpoint.c
4508@@ -831,7 +831,7 @@ static int __cpuinit hw_breakpoint_reset_notify(struct notifier_block *self,
4509 return NOTIFY_OK;
4510 }
4511
4512-static struct notifier_block __cpuinitdata hw_breakpoint_reset_nb = {
4513+static struct notifier_block hw_breakpoint_reset_nb = {
4514 .notifier_call = hw_breakpoint_reset_notify,
4515 };
4516
4517diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4518index c3a58a1..78fbf54 100644
4519--- a/arch/avr32/include/asm/cache.h
4520+++ b/arch/avr32/include/asm/cache.h
4521@@ -1,8 +1,10 @@
4522 #ifndef __ASM_AVR32_CACHE_H
4523 #define __ASM_AVR32_CACHE_H
4524
4525+#include <linux/const.h>
4526+
4527 #define L1_CACHE_SHIFT 5
4528-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4529+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4530
4531 /*
4532 * Memory returned by kmalloc() may be used for DMA, so we must make
4533diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4534index d232888..87c8df1 100644
4535--- a/arch/avr32/include/asm/elf.h
4536+++ b/arch/avr32/include/asm/elf.h
4537@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4538 the loader. We need to make sure that it is out of the way of the program
4539 that it will "exec", and that there is sufficient room for the brk. */
4540
4541-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4542+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4543
4544+#ifdef CONFIG_PAX_ASLR
4545+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4546+
4547+#define PAX_DELTA_MMAP_LEN 15
4548+#define PAX_DELTA_STACK_LEN 15
4549+#endif
4550
4551 /* This yields a mask that user programs can use to figure out what
4552 instruction set this CPU supports. This could be done in user space,
4553diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4554index 479330b..53717a8 100644
4555--- a/arch/avr32/include/asm/kmap_types.h
4556+++ b/arch/avr32/include/asm/kmap_types.h
4557@@ -2,9 +2,9 @@
4558 #define __ASM_AVR32_KMAP_TYPES_H
4559
4560 #ifdef CONFIG_DEBUG_HIGHMEM
4561-# define KM_TYPE_NR 29
4562+# define KM_TYPE_NR 30
4563 #else
4564-# define KM_TYPE_NR 14
4565+# define KM_TYPE_NR 15
4566 #endif
4567
4568 #endif /* __ASM_AVR32_KMAP_TYPES_H */
4569diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
4570index b2f2d2d..d1c85cb 100644
4571--- a/arch/avr32/mm/fault.c
4572+++ b/arch/avr32/mm/fault.c
4573@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
4574
4575 int exception_trace = 1;
4576
4577+#ifdef CONFIG_PAX_PAGEEXEC
4578+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4579+{
4580+ unsigned long i;
4581+
4582+ printk(KERN_ERR "PAX: bytes at PC: ");
4583+ for (i = 0; i < 20; i++) {
4584+ unsigned char c;
4585+ if (get_user(c, (unsigned char *)pc+i))
4586+ printk(KERN_CONT "???????? ");
4587+ else
4588+ printk(KERN_CONT "%02x ", c);
4589+ }
4590+ printk("\n");
4591+}
4592+#endif
4593+
4594 /*
4595 * This routine handles page faults. It determines the address and the
4596 * problem, and then passes it off to one of the appropriate routines.
4597@@ -174,6 +191,16 @@ bad_area:
4598 up_read(&mm->mmap_sem);
4599
4600 if (user_mode(regs)) {
4601+
4602+#ifdef CONFIG_PAX_PAGEEXEC
4603+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4604+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
4605+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
4606+ do_group_exit(SIGKILL);
4607+ }
4608+ }
4609+#endif
4610+
4611 if (exception_trace && printk_ratelimit())
4612 printk("%s%s[%d]: segfault at %08lx pc %08lx "
4613 "sp %08lx ecr %lu\n",
4614diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
4615index 568885a..f8008df 100644
4616--- a/arch/blackfin/include/asm/cache.h
4617+++ b/arch/blackfin/include/asm/cache.h
4618@@ -7,6 +7,7 @@
4619 #ifndef __ARCH_BLACKFIN_CACHE_H
4620 #define __ARCH_BLACKFIN_CACHE_H
4621
4622+#include <linux/const.h>
4623 #include <linux/linkage.h> /* for asmlinkage */
4624
4625 /*
4626@@ -14,7 +15,7 @@
4627 * Blackfin loads 32 bytes for cache
4628 */
4629 #define L1_CACHE_SHIFT 5
4630-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4631+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4632 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4633
4634 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
4635diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
4636index aea2718..3639a60 100644
4637--- a/arch/cris/include/arch-v10/arch/cache.h
4638+++ b/arch/cris/include/arch-v10/arch/cache.h
4639@@ -1,8 +1,9 @@
4640 #ifndef _ASM_ARCH_CACHE_H
4641 #define _ASM_ARCH_CACHE_H
4642
4643+#include <linux/const.h>
4644 /* Etrax 100LX have 32-byte cache-lines. */
4645-#define L1_CACHE_BYTES 32
4646 #define L1_CACHE_SHIFT 5
4647+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4648
4649 #endif /* _ASM_ARCH_CACHE_H */
4650diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
4651index 7caf25d..ee65ac5 100644
4652--- a/arch/cris/include/arch-v32/arch/cache.h
4653+++ b/arch/cris/include/arch-v32/arch/cache.h
4654@@ -1,11 +1,12 @@
4655 #ifndef _ASM_CRIS_ARCH_CACHE_H
4656 #define _ASM_CRIS_ARCH_CACHE_H
4657
4658+#include <linux/const.h>
4659 #include <arch/hwregs/dma.h>
4660
4661 /* A cache-line is 32 bytes. */
4662-#define L1_CACHE_BYTES 32
4663 #define L1_CACHE_SHIFT 5
4664+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4665
4666 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4667
4668diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
4669index b86329d..6709906 100644
4670--- a/arch/frv/include/asm/atomic.h
4671+++ b/arch/frv/include/asm/atomic.h
4672@@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v)
4673 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
4674 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
4675
4676+#define atomic64_read_unchecked(v) atomic64_read(v)
4677+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4678+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4679+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4680+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4681+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4682+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4683+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4684+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4685+
4686 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
4687 {
4688 int c, old;
4689diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
4690index 2797163..c2a401d 100644
4691--- a/arch/frv/include/asm/cache.h
4692+++ b/arch/frv/include/asm/cache.h
4693@@ -12,10 +12,11 @@
4694 #ifndef __ASM_CACHE_H
4695 #define __ASM_CACHE_H
4696
4697+#include <linux/const.h>
4698
4699 /* bytes per L1 cache line */
4700 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
4701-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4702+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4703
4704 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4705 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4706diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
4707index 43901f2..0d8b865 100644
4708--- a/arch/frv/include/asm/kmap_types.h
4709+++ b/arch/frv/include/asm/kmap_types.h
4710@@ -2,6 +2,6 @@
4711 #ifndef _ASM_KMAP_TYPES_H
4712 #define _ASM_KMAP_TYPES_H
4713
4714-#define KM_TYPE_NR 17
4715+#define KM_TYPE_NR 18
4716
4717 #endif
4718diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
4719index 836f147..4cf23f5 100644
4720--- a/arch/frv/mm/elf-fdpic.c
4721+++ b/arch/frv/mm/elf-fdpic.c
4722@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4723 {
4724 struct vm_area_struct *vma;
4725 struct vm_unmapped_area_info info;
4726+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
4727
4728 if (len > TASK_SIZE)
4729 return -ENOMEM;
4730@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4731 if (addr) {
4732 addr = PAGE_ALIGN(addr);
4733 vma = find_vma(current->mm, addr);
4734- if (TASK_SIZE - len >= addr &&
4735- (!vma || addr + len <= vma->vm_start))
4736+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4737 goto success;
4738 }
4739
4740@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4741 info.high_limit = (current->mm->start_stack - 0x00200000);
4742 info.align_mask = 0;
4743 info.align_offset = 0;
4744+ info.threadstack_offset = offset;
4745 addr = vm_unmapped_area(&info);
4746 if (!(addr & ~PAGE_MASK))
4747 goto success;
4748diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
4749index f4ca594..adc72fd6 100644
4750--- a/arch/hexagon/include/asm/cache.h
4751+++ b/arch/hexagon/include/asm/cache.h
4752@@ -21,9 +21,11 @@
4753 #ifndef __ASM_CACHE_H
4754 #define __ASM_CACHE_H
4755
4756+#include <linux/const.h>
4757+
4758 /* Bytes per L1 cache line */
4759-#define L1_CACHE_SHIFT (5)
4760-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4761+#define L1_CACHE_SHIFT 5
4762+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4763
4764 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
4765 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
4766diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
4767index 6e6fe18..a6ae668 100644
4768--- a/arch/ia64/include/asm/atomic.h
4769+++ b/arch/ia64/include/asm/atomic.h
4770@@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
4771 #define atomic64_inc(v) atomic64_add(1, (v))
4772 #define atomic64_dec(v) atomic64_sub(1, (v))
4773
4774+#define atomic64_read_unchecked(v) atomic64_read(v)
4775+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4776+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4777+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4778+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4779+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4780+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4781+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4782+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4783+
4784 /* Atomic operations are already serializing */
4785 #define smp_mb__before_atomic_dec() barrier()
4786 #define smp_mb__after_atomic_dec() barrier()
4787diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
4788index 988254a..e1ee885 100644
4789--- a/arch/ia64/include/asm/cache.h
4790+++ b/arch/ia64/include/asm/cache.h
4791@@ -1,6 +1,7 @@
4792 #ifndef _ASM_IA64_CACHE_H
4793 #define _ASM_IA64_CACHE_H
4794
4795+#include <linux/const.h>
4796
4797 /*
4798 * Copyright (C) 1998-2000 Hewlett-Packard Co
4799@@ -9,7 +10,7 @@
4800
4801 /* Bytes per L1 (data) cache line. */
4802 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
4803-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4804+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4805
4806 #ifdef CONFIG_SMP
4807 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
4808diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
4809index 5a83c5c..4d7f553 100644
4810--- a/arch/ia64/include/asm/elf.h
4811+++ b/arch/ia64/include/asm/elf.h
4812@@ -42,6 +42,13 @@
4813 */
4814 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
4815
4816+#ifdef CONFIG_PAX_ASLR
4817+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
4818+
4819+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4820+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4821+#endif
4822+
4823 #define PT_IA_64_UNWIND 0x70000001
4824
4825 /* IA-64 relocations: */
4826diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
4827index 96a8d92..617a1cf 100644
4828--- a/arch/ia64/include/asm/pgalloc.h
4829+++ b/arch/ia64/include/asm/pgalloc.h
4830@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4831 pgd_val(*pgd_entry) = __pa(pud);
4832 }
4833
4834+static inline void
4835+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4836+{
4837+ pgd_populate(mm, pgd_entry, pud);
4838+}
4839+
4840 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
4841 {
4842 return quicklist_alloc(0, GFP_KERNEL, NULL);
4843@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
4844 pud_val(*pud_entry) = __pa(pmd);
4845 }
4846
4847+static inline void
4848+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
4849+{
4850+ pud_populate(mm, pud_entry, pmd);
4851+}
4852+
4853 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
4854 {
4855 return quicklist_alloc(0, GFP_KERNEL, NULL);
4856diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
4857index 815810c..d60bd4c 100644
4858--- a/arch/ia64/include/asm/pgtable.h
4859+++ b/arch/ia64/include/asm/pgtable.h
4860@@ -12,7 +12,7 @@
4861 * David Mosberger-Tang <davidm@hpl.hp.com>
4862 */
4863
4864-
4865+#include <linux/const.h>
4866 #include <asm/mman.h>
4867 #include <asm/page.h>
4868 #include <asm/processor.h>
4869@@ -142,6 +142,17 @@
4870 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4871 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4872 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
4873+
4874+#ifdef CONFIG_PAX_PAGEEXEC
4875+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
4876+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4877+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4878+#else
4879+# define PAGE_SHARED_NOEXEC PAGE_SHARED
4880+# define PAGE_READONLY_NOEXEC PAGE_READONLY
4881+# define PAGE_COPY_NOEXEC PAGE_COPY
4882+#endif
4883+
4884 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
4885 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
4886 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
4887diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
4888index 54ff557..70c88b7 100644
4889--- a/arch/ia64/include/asm/spinlock.h
4890+++ b/arch/ia64/include/asm/spinlock.h
4891@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
4892 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
4893
4894 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
4895- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
4896+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
4897 }
4898
4899 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
4900diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
4901index 449c8c0..18965fb 100644
4902--- a/arch/ia64/include/asm/uaccess.h
4903+++ b/arch/ia64/include/asm/uaccess.h
4904@@ -240,12 +240,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
4905 static inline unsigned long
4906 __copy_to_user (void __user *to, const void *from, unsigned long count)
4907 {
4908+ if (count > INT_MAX)
4909+ return count;
4910+
4911+ if (!__builtin_constant_p(count))
4912+ check_object_size(from, count, true);
4913+
4914 return __copy_user(to, (__force void __user *) from, count);
4915 }
4916
4917 static inline unsigned long
4918 __copy_from_user (void *to, const void __user *from, unsigned long count)
4919 {
4920+ if (count > INT_MAX)
4921+ return count;
4922+
4923+ if (!__builtin_constant_p(count))
4924+ check_object_size(to, count, false);
4925+
4926 return __copy_user((__force void __user *) to, from, count);
4927 }
4928
4929@@ -255,10 +267,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
4930 ({ \
4931 void __user *__cu_to = (to); \
4932 const void *__cu_from = (from); \
4933- long __cu_len = (n); \
4934+ unsigned long __cu_len = (n); \
4935 \
4936- if (__access_ok(__cu_to, __cu_len, get_fs())) \
4937+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
4938+ if (!__builtin_constant_p(n)) \
4939+ check_object_size(__cu_from, __cu_len, true); \
4940 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
4941+ } \
4942 __cu_len; \
4943 })
4944
4945@@ -266,11 +281,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
4946 ({ \
4947 void *__cu_to = (to); \
4948 const void __user *__cu_from = (from); \
4949- long __cu_len = (n); \
4950+ unsigned long __cu_len = (n); \
4951 \
4952 __chk_user_ptr(__cu_from); \
4953- if (__access_ok(__cu_from, __cu_len, get_fs())) \
4954+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
4955+ if (!__builtin_constant_p(n)) \
4956+ check_object_size(__cu_to, __cu_len, false); \
4957 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
4958+ } \
4959 __cu_len; \
4960 })
4961
4962diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
4963index 2d67317..07d8bfa 100644
4964--- a/arch/ia64/kernel/err_inject.c
4965+++ b/arch/ia64/kernel/err_inject.c
4966@@ -256,7 +256,7 @@ static int __cpuinit err_inject_cpu_callback(struct notifier_block *nfb,
4967 return NOTIFY_OK;
4968 }
4969
4970-static struct notifier_block __cpuinitdata err_inject_cpu_notifier =
4971+static struct notifier_block err_inject_cpu_notifier =
4972 {
4973 .notifier_call = err_inject_cpu_callback,
4974 };
4975diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
4976index d7396db..b33e873 100644
4977--- a/arch/ia64/kernel/mca.c
4978+++ b/arch/ia64/kernel/mca.c
4979@@ -1922,7 +1922,7 @@ static int __cpuinit mca_cpu_callback(struct notifier_block *nfb,
4980 return NOTIFY_OK;
4981 }
4982
4983-static struct notifier_block mca_cpu_notifier __cpuinitdata = {
4984+static struct notifier_block mca_cpu_notifier = {
4985 .notifier_call = mca_cpu_callback
4986 };
4987
4988diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
4989index 24603be..948052d 100644
4990--- a/arch/ia64/kernel/module.c
4991+++ b/arch/ia64/kernel/module.c
4992@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
4993 void
4994 module_free (struct module *mod, void *module_region)
4995 {
4996- if (mod && mod->arch.init_unw_table &&
4997- module_region == mod->module_init) {
4998+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
4999 unw_remove_unwind_table(mod->arch.init_unw_table);
5000 mod->arch.init_unw_table = NULL;
5001 }
5002@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
5003 }
5004
5005 static inline int
5006+in_init_rx (const struct module *mod, uint64_t addr)
5007+{
5008+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
5009+}
5010+
5011+static inline int
5012+in_init_rw (const struct module *mod, uint64_t addr)
5013+{
5014+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
5015+}
5016+
5017+static inline int
5018 in_init (const struct module *mod, uint64_t addr)
5019 {
5020- return addr - (uint64_t) mod->module_init < mod->init_size;
5021+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
5022+}
5023+
5024+static inline int
5025+in_core_rx (const struct module *mod, uint64_t addr)
5026+{
5027+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
5028+}
5029+
5030+static inline int
5031+in_core_rw (const struct module *mod, uint64_t addr)
5032+{
5033+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
5034 }
5035
5036 static inline int
5037 in_core (const struct module *mod, uint64_t addr)
5038 {
5039- return addr - (uint64_t) mod->module_core < mod->core_size;
5040+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
5041 }
5042
5043 static inline int
5044@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
5045 break;
5046
5047 case RV_BDREL:
5048- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
5049+ if (in_init_rx(mod, val))
5050+ val -= (uint64_t) mod->module_init_rx;
5051+ else if (in_init_rw(mod, val))
5052+ val -= (uint64_t) mod->module_init_rw;
5053+ else if (in_core_rx(mod, val))
5054+ val -= (uint64_t) mod->module_core_rx;
5055+ else if (in_core_rw(mod, val))
5056+ val -= (uint64_t) mod->module_core_rw;
5057 break;
5058
5059 case RV_LTV:
5060@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
5061 * addresses have been selected...
5062 */
5063 uint64_t gp;
5064- if (mod->core_size > MAX_LTOFF)
5065+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
5066 /*
5067 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
5068 * at the end of the module.
5069 */
5070- gp = mod->core_size - MAX_LTOFF / 2;
5071+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
5072 else
5073- gp = mod->core_size / 2;
5074- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
5075+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
5076+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
5077 mod->arch.gp = gp;
5078 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
5079 }
5080diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
5081index 2b3c2d7..a318d84 100644
5082--- a/arch/ia64/kernel/palinfo.c
5083+++ b/arch/ia64/kernel/palinfo.c
5084@@ -980,7 +980,7 @@ static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
5085 return NOTIFY_OK;
5086 }
5087
5088-static struct notifier_block __refdata palinfo_cpu_notifier =
5089+static struct notifier_block palinfo_cpu_notifier =
5090 {
5091 .notifier_call = palinfo_cpu_callback,
5092 .priority = 0,
5093diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
5094index 4bc580a..7767f24 100644
5095--- a/arch/ia64/kernel/salinfo.c
5096+++ b/arch/ia64/kernel/salinfo.c
5097@@ -609,7 +609,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu
5098 return NOTIFY_OK;
5099 }
5100
5101-static struct notifier_block salinfo_cpu_notifier __cpuinitdata =
5102+static struct notifier_block salinfo_cpu_notifier =
5103 {
5104 .notifier_call = salinfo_cpu_callback,
5105 .priority = 0,
5106diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
5107index 41e33f8..65180b2 100644
5108--- a/arch/ia64/kernel/sys_ia64.c
5109+++ b/arch/ia64/kernel/sys_ia64.c
5110@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5111 unsigned long align_mask = 0;
5112 struct mm_struct *mm = current->mm;
5113 struct vm_unmapped_area_info info;
5114+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5115
5116 if (len > RGN_MAP_LIMIT)
5117 return -ENOMEM;
5118@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5119 if (REGION_NUMBER(addr) == RGN_HPAGE)
5120 addr = 0;
5121 #endif
5122+
5123+#ifdef CONFIG_PAX_RANDMMAP
5124+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5125+ addr = mm->free_area_cache;
5126+ else
5127+#endif
5128+
5129 if (!addr)
5130 addr = TASK_UNMAPPED_BASE;
5131
5132@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5133 info.high_limit = TASK_SIZE;
5134 info.align_mask = align_mask;
5135 info.align_offset = 0;
5136+ info.threadstack_offset = offset;
5137 return vm_unmapped_area(&info);
5138 }
5139
5140diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
5141index dc00b2c..cce53c2 100644
5142--- a/arch/ia64/kernel/topology.c
5143+++ b/arch/ia64/kernel/topology.c
5144@@ -445,7 +445,7 @@ static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
5145 return NOTIFY_OK;
5146 }
5147
5148-static struct notifier_block __cpuinitdata cache_cpu_notifier =
5149+static struct notifier_block cache_cpu_notifier =
5150 {
5151 .notifier_call = cache_cpu_callback
5152 };
5153diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5154index 0ccb28f..8992469 100644
5155--- a/arch/ia64/kernel/vmlinux.lds.S
5156+++ b/arch/ia64/kernel/vmlinux.lds.S
5157@@ -198,7 +198,7 @@ SECTIONS {
5158 /* Per-cpu data: */
5159 . = ALIGN(PERCPU_PAGE_SIZE);
5160 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5161- __phys_per_cpu_start = __per_cpu_load;
5162+ __phys_per_cpu_start = per_cpu_load;
5163 /*
5164 * ensure percpu data fits
5165 * into percpu page size
5166diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5167index 6cf0341..d352594 100644
5168--- a/arch/ia64/mm/fault.c
5169+++ b/arch/ia64/mm/fault.c
5170@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5171 return pte_present(pte);
5172 }
5173
5174+#ifdef CONFIG_PAX_PAGEEXEC
5175+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5176+{
5177+ unsigned long i;
5178+
5179+ printk(KERN_ERR "PAX: bytes at PC: ");
5180+ for (i = 0; i < 8; i++) {
5181+ unsigned int c;
5182+ if (get_user(c, (unsigned int *)pc+i))
5183+ printk(KERN_CONT "???????? ");
5184+ else
5185+ printk(KERN_CONT "%08x ", c);
5186+ }
5187+ printk("\n");
5188+}
5189+#endif
5190+
5191 # define VM_READ_BIT 0
5192 # define VM_WRITE_BIT 1
5193 # define VM_EXEC_BIT 2
5194@@ -149,8 +166,21 @@ retry:
5195 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5196 goto bad_area;
5197
5198- if ((vma->vm_flags & mask) != mask)
5199+ if ((vma->vm_flags & mask) != mask) {
5200+
5201+#ifdef CONFIG_PAX_PAGEEXEC
5202+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5203+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5204+ goto bad_area;
5205+
5206+ up_read(&mm->mmap_sem);
5207+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5208+ do_group_exit(SIGKILL);
5209+ }
5210+#endif
5211+
5212 goto bad_area;
5213+ }
5214
5215 /*
5216 * If for any reason at all we couldn't handle the fault, make
5217diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5218index 76069c1..c2aa816 100644
5219--- a/arch/ia64/mm/hugetlbpage.c
5220+++ b/arch/ia64/mm/hugetlbpage.c
5221@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5222 unsigned long pgoff, unsigned long flags)
5223 {
5224 struct vm_unmapped_area_info info;
5225+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5226
5227 if (len > RGN_MAP_LIMIT)
5228 return -ENOMEM;
5229@@ -172,6 +173,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5230 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
5231 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
5232 info.align_offset = 0;
5233+ info.threadstack_offset = offset;
5234 return vm_unmapped_area(&info);
5235 }
5236
5237diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5238index d1fe4b4..2628f37 100644
5239--- a/arch/ia64/mm/init.c
5240+++ b/arch/ia64/mm/init.c
5241@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
5242 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5243 vma->vm_end = vma->vm_start + PAGE_SIZE;
5244 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5245+
5246+#ifdef CONFIG_PAX_PAGEEXEC
5247+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5248+ vma->vm_flags &= ~VM_EXEC;
5249+
5250+#ifdef CONFIG_PAX_MPROTECT
5251+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
5252+ vma->vm_flags &= ~VM_MAYEXEC;
5253+#endif
5254+
5255+ }
5256+#endif
5257+
5258 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5259 down_write(&current->mm->mmap_sem);
5260 if (insert_vm_struct(current->mm, vma)) {
5261diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5262index 40b3ee9..8c2c112 100644
5263--- a/arch/m32r/include/asm/cache.h
5264+++ b/arch/m32r/include/asm/cache.h
5265@@ -1,8 +1,10 @@
5266 #ifndef _ASM_M32R_CACHE_H
5267 #define _ASM_M32R_CACHE_H
5268
5269+#include <linux/const.h>
5270+
5271 /* L1 cache line size */
5272 #define L1_CACHE_SHIFT 4
5273-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5274+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5275
5276 #endif /* _ASM_M32R_CACHE_H */
5277diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5278index 82abd15..d95ae5d 100644
5279--- a/arch/m32r/lib/usercopy.c
5280+++ b/arch/m32r/lib/usercopy.c
5281@@ -14,6 +14,9 @@
5282 unsigned long
5283 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5284 {
5285+ if ((long)n < 0)
5286+ return n;
5287+
5288 prefetch(from);
5289 if (access_ok(VERIFY_WRITE, to, n))
5290 __copy_user(to,from,n);
5291@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5292 unsigned long
5293 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5294 {
5295+ if ((long)n < 0)
5296+ return n;
5297+
5298 prefetchw(to);
5299 if (access_ok(VERIFY_READ, from, n))
5300 __copy_user_zeroing(to,from,n);
5301diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5302index 0395c51..5f26031 100644
5303--- a/arch/m68k/include/asm/cache.h
5304+++ b/arch/m68k/include/asm/cache.h
5305@@ -4,9 +4,11 @@
5306 #ifndef __ARCH_M68K_CACHE_H
5307 #define __ARCH_M68K_CACHE_H
5308
5309+#include <linux/const.h>
5310+
5311 /* bytes per L1 cache line */
5312 #define L1_CACHE_SHIFT 4
5313-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5314+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5315
5316 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5317
5318diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
5319index 3c52fa6..11b2ad8 100644
5320--- a/arch/metag/mm/hugetlbpage.c
5321+++ b/arch/metag/mm/hugetlbpage.c
5322@@ -200,6 +200,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
5323 info.high_limit = TASK_SIZE;
5324 info.align_mask = PAGE_MASK & HUGEPT_MASK;
5325 info.align_offset = 0;
5326+ info.threadstack_offset = 0;
5327 return vm_unmapped_area(&info);
5328 }
5329
5330diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5331index 4efe96a..60e8699 100644
5332--- a/arch/microblaze/include/asm/cache.h
5333+++ b/arch/microblaze/include/asm/cache.h
5334@@ -13,11 +13,12 @@
5335 #ifndef _ASM_MICROBLAZE_CACHE_H
5336 #define _ASM_MICROBLAZE_CACHE_H
5337
5338+#include <linux/const.h>
5339 #include <asm/registers.h>
5340
5341 #define L1_CACHE_SHIFT 5
5342 /* word-granular cache in microblaze */
5343-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5344+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5345
5346 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5347
5348diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
e2b79cd1 5349index 08b6079..e94e6da 100644
bb5f0bf8
AF
5350--- a/arch/mips/include/asm/atomic.h
5351+++ b/arch/mips/include/asm/atomic.h
e2b79cd1 5352@@ -21,15 +21,39 @@
bb5f0bf8
AF
5353 #include <asm/cmpxchg.h>
5354 #include <asm/war.h>
5355
5356+#ifdef CONFIG_GENERIC_ATOMIC64
5357+#include <asm-generic/atomic64.h>
5358+#endif
5359+
5360 #define ATOMIC_INIT(i) { (i) }
5361
e2b79cd1
AF
5362+#ifdef CONFIG_64BIT
5363+#define _ASM_EXTABLE(from, to) \
5364+" .section __ex_table,\"a\"\n" \
5365+" .dword " #from ", " #to"\n" \
5366+" .previous\n"
5367+#else
5368+#define _ASM_EXTABLE(from, to) \
5369+" .section __ex_table,\"a\"\n" \
5370+" .word " #from ", " #to"\n" \
5371+" .previous\n"
5372+#endif
5373+
bb5f0bf8 5374 /*
e2b79cd1
AF
5375 * atomic_read - read atomic variable
5376 * @v: pointer of type atomic_t
5377 *
5378 * Atomically reads the value of @v.
bb5f0bf8 5379 */
e2b79cd1
AF
5380-#define atomic_read(v) (*(volatile int *)&(v)->counter)
5381+static inline int atomic_read(const atomic_t *v)
5382+{
5383+ return (*(volatile const int *) &v->counter);
5384+}
5385+
5386+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5387+{
5388+ return (*(volatile const int *) &v->counter);
5389+}
bb5f0bf8 5390
e2b79cd1
AF
5391 /*
5392 * atomic_set - set atomic variable
5393@@ -38,7 +62,15 @@
5394 *
5395 * Atomically sets the value of @v to @i.
5396 */
5397-#define atomic_set(v, i) ((v)->counter = (i))
5398+static inline void atomic_set(atomic_t *v, int i)
5399+{
5400+ v->counter = i;
5401+}
bb5f0bf8 5402+
e2b79cd1
AF
5403+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5404+{
5405+ v->counter = i;
5406+}
5407
5408 /*
5409 * atomic_add - add integer to atomic variable
5410@@ -47,7 +79,67 @@
5411 *
5412 * Atomically adds @i to @v.
5413 */
5414-static __inline__ void atomic_add(int i, atomic_t * v)
5415+static __inline__ void atomic_add(int i, atomic_t *v)
5416+{
5417+ int temp;
5418+
5419+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5420+ __asm__ __volatile__(
5421+ " .set mips3 \n"
5422+ "1: ll %0, %1 # atomic_add \n"
5423+#ifdef CONFIG_PAX_REFCOUNT
5424+ /* Exception on overflow. */
5425+ "2: add %0, %2 \n"
5426+#else
5427+ " addu %0, %2 \n"
5428+#endif
5429+ " sc %0, %1 \n"
5430+ " beqzl %0, 1b \n"
5431+#ifdef CONFIG_PAX_REFCOUNT
5432+ "3: \n"
5433+ _ASM_EXTABLE(2b, 3b)
5434+#endif
5435+ " .set mips0 \n"
5436+ : "=&r" (temp), "+m" (v->counter)
5437+ : "Ir" (i));
5438+ } else if (kernel_uses_llsc) {
5439+ __asm__ __volatile__(
5440+ " .set mips3 \n"
5441+ "1: ll %0, %1 # atomic_add \n"
5442+#ifdef CONFIG_PAX_REFCOUNT
5443+ /* Exception on overflow. */
5444+ "2: add %0, %2 \n"
5445+#else
5446+ " addu %0, %2 \n"
5447+#endif
5448+ " sc %0, %1 \n"
5449+ " beqz %0, 1b \n"
5450+#ifdef CONFIG_PAX_REFCOUNT
5451+ "3: \n"
5452+ _ASM_EXTABLE(2b, 3b)
5453+#endif
5454+ " .set mips0 \n"
5455+ : "=&r" (temp), "+m" (v->counter)
5456+ : "Ir" (i));
5457+ } else {
5458+ unsigned long flags;
5459+
5460+ raw_local_irq_save(flags);
5461+ __asm__ __volatile__(
5462+#ifdef CONFIG_PAX_REFCOUNT
5463+ /* Exception on overflow. */
5464+ "1: add %0, %1 \n"
5465+ "2: \n"
5466+ _ASM_EXTABLE(1b, 2b)
5467+#else
5468+ " addu %0, %1 \n"
5469+#endif
5470+ : "+r" (v->counter) : "Ir" (i));
5471+ raw_local_irq_restore(flags);
5472+ }
5473+}
5474+
5475+static __inline__ void atomic_add_unchecked(int i, atomic_unchecked_t *v)
5476 {
5477 if (kernel_uses_llsc && R10000_LLSC_WAR) {
5478 int temp;
5479@@ -90,7 +182,67 @@ static __inline__ void atomic_add(int i, atomic_t * v)
5480 *
5481 * Atomically subtracts @i from @v.
5482 */
5483-static __inline__ void atomic_sub(int i, atomic_t * v)
5484+static __inline__ void atomic_sub(int i, atomic_t *v)
5485+{
5486+ int temp;
5487+
5488+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5489+ __asm__ __volatile__(
5490+ " .set mips3 \n"
5491+ "1: ll %0, %1 # atomic64_sub \n"
5492+#ifdef CONFIG_PAX_REFCOUNT
5493+ /* Exception on overflow. */
5494+ "2: sub %0, %2 \n"
5495+#else
5496+ " subu %0, %2 \n"
5497+#endif
5498+ " sc %0, %1 \n"
5499+ " beqzl %0, 1b \n"
5500+#ifdef CONFIG_PAX_REFCOUNT
5501+ "3: \n"
5502+ _ASM_EXTABLE(2b, 3b)
5503+#endif
5504+ " .set mips0 \n"
5505+ : "=&r" (temp), "+m" (v->counter)
5506+ : "Ir" (i));
5507+ } else if (kernel_uses_llsc) {
5508+ __asm__ __volatile__(
5509+ " .set mips3 \n"
5510+ "1: ll %0, %1 # atomic64_sub \n"
5511+#ifdef CONFIG_PAX_REFCOUNT
5512+ /* Exception on overflow. */
5513+ "2: sub %0, %2 \n"
5514+#else
5515+ " subu %0, %2 \n"
5516+#endif
5517+ " sc %0, %1 \n"
5518+ " beqz %0, 1b \n"
5519+#ifdef CONFIG_PAX_REFCOUNT
5520+ "3: \n"
5521+ _ASM_EXTABLE(2b, 3b)
5522+#endif
5523+ " .set mips0 \n"
5524+ : "=&r" (temp), "+m" (v->counter)
5525+ : "Ir" (i));
5526+ } else {
5527+ unsigned long flags;
5528+
5529+ raw_local_irq_save(flags);
5530+ __asm__ __volatile__(
5531+#ifdef CONFIG_PAX_REFCOUNT
5532+ /* Exception on overflow. */
5533+ "1: sub %0, %1 \n"
5534+ "2: \n"
5535+ _ASM_EXTABLE(1b, 2b)
5536+#else
5537+ " subu %0, %1 \n"
5538+#endif
5539+ : "+r" (v->counter) : "Ir" (i));
5540+ raw_local_irq_restore(flags);
5541+ }
5542+}
5543+
5544+static __inline__ void atomic_sub_unchecked(long i, atomic_unchecked_t *v)
5545 {
5546 if (kernel_uses_llsc && R10000_LLSC_WAR) {
5547 int temp;
5548@@ -129,7 +281,93 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
5549 /*
5550 * Same as above, but return the result value
5551 */
5552-static __inline__ int atomic_add_return(int i, atomic_t * v)
5553+static __inline__ int atomic_add_return(int i, atomic_t *v)
5554+{
5555+ int result;
5556+ int temp;
5557+
5558+ smp_mb__before_llsc();
5559+
5560+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5561+ __asm__ __volatile__(
5562+ " .set mips3 \n"
5563+ "1: ll %1, %2 # atomic_add_return \n"
5564+#ifdef CONFIG_PAX_REFCOUNT
5565+ "2: add %0, %1, %3 \n"
5566+#else
5567+ " addu %0, %1, %3 \n"
5568+#endif
5569+ " sc %0, %2 \n"
5570+ " beqzl %0, 1b \n"
5571+#ifdef CONFIG_PAX_REFCOUNT
5572+ " b 4f \n"
5573+ " .set noreorder \n"
5574+ "3: b 5f \n"
5575+ " move %0, %1 \n"
5576+ " .set reorder \n"
5577+ _ASM_EXTABLE(2b, 3b)
5578+#endif
5579+ "4: addu %0, %1, %3 \n"
5580+#ifdef CONFIG_PAX_REFCOUNT
5581+ "5: \n"
5582+#endif
5583+ " .set mips0 \n"
5584+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
5585+ : "Ir" (i));
5586+ } else if (kernel_uses_llsc) {
5587+ __asm__ __volatile__(
5588+ " .set mips3 \n"
5589+ "1: ll %1, %2 # atomic_add_return \n"
5590+#ifdef CONFIG_PAX_REFCOUNT
5591+ "2: add %0, %1, %3 \n"
5592+#else
5593+ " addu %0, %1, %3 \n"
5594+#endif
5595+ " sc %0, %2 \n"
5596+ " bnez %0, 4f \n"
5597+ " b 1b \n"
5598+#ifdef CONFIG_PAX_REFCOUNT
5599+ " .set noreorder \n"
5600+ "3: b 5f \n"
5601+ " move %0, %1 \n"
5602+ " .set reorder \n"
5603+ _ASM_EXTABLE(2b, 3b)
5604+#endif
5605+ "4: addu %0, %1, %3 \n"
5606+#ifdef CONFIG_PAX_REFCOUNT
5607+ "5: \n"
5608+#endif
5609+ " .set mips0 \n"
5610+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
5611+ : "Ir" (i));
5612+ } else {
5613+ unsigned long flags;
5614+
5615+ raw_local_irq_save(flags);
5616+ __asm__ __volatile__(
5617+ " lw %0, %1 \n"
5618+#ifdef CONFIG_PAX_REFCOUNT
5619+ /* Exception on overflow. */
5620+ "1: add %0, %2 \n"
5621+#else
5622+ " addu %0, %2 \n"
5623+#endif
5624+ " sw %0, %1 \n"
5625+#ifdef CONFIG_PAX_REFCOUNT
5626+ /* Note: Dest reg is not modified on overflow */
5627+ "2: \n"
5628+ _ASM_EXTABLE(1b, 2b)
5629+#endif
5630+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
5631+ raw_local_irq_restore(flags);
5632+ }
5633+
5634+ smp_llsc_mb();
5635+
5636+ return result;
5637+}
5638+
5639+static __inline__ int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
5640 {
5641 int result;
5642
5643@@ -178,7 +416,93 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
5644 return result;
5645 }
5646
5647-static __inline__ int atomic_sub_return(int i, atomic_t * v)
5648+static __inline__ int atomic_sub_return(int i, atomic_t *v)
5649+{
5650+ int result;
5651+ int temp;
5652+
5653+ smp_mb__before_llsc();
5654+
5655+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5656+ __asm__ __volatile__(
5657+ " .set mips3 \n"
5658+ "1: ll %1, %2 # atomic_sub_return \n"
5659+#ifdef CONFIG_PAX_REFCOUNT
5660+ "2: sub %0, %1, %3 \n"
5661+#else
5662+ " subu %0, %1, %3 \n"
5663+#endif
5664+ " sc %0, %2 \n"
5665+ " beqzl %0, 1b \n"
5666+#ifdef CONFIG_PAX_REFCOUNT
5667+ " b 4f \n"
5668+ " .set noreorder \n"
5669+ "3: b 5f \n"
5670+ " move %0, %1 \n"
5671+ " .set reorder \n"
5672+ _ASM_EXTABLE(2b, 3b)
5673+#endif
5674+ "4: subu %0, %1, %3 \n"
5675+#ifdef CONFIG_PAX_REFCOUNT
5676+ "5: \n"
5677+#endif
5678+ " .set mips0 \n"
5679+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
5680+ : "Ir" (i), "m" (v->counter)
5681+ : "memory");
5682+ } else if (kernel_uses_llsc) {
5683+ __asm__ __volatile__(
5684+ " .set mips3 \n"
5685+ "1: ll %1, %2 # atomic_sub_return \n"
5686+#ifdef CONFIG_PAX_REFCOUNT
5687+ "2: sub %0, %1, %3 \n"
5688+#else
5689+ " subu %0, %1, %3 \n"
5690+#endif
5691+ " sc %0, %2 \n"
5692+ " bnez %0, 4f \n"
5693+ " b 1b \n"
5694+#ifdef CONFIG_PAX_REFCOUNT
5695+ " .set noreorder \n"
5696+ "3: b 5f \n"
5697+ " move %0, %1 \n"
5698+ " .set reorder \n"
5699+ _ASM_EXTABLE(2b, 3b)
5700+#endif
5701+ "4: subu %0, %1, %3 \n"
5702+#ifdef CONFIG_PAX_REFCOUNT
5703+ "5: \n"
5704+#endif
5705+ " .set mips0 \n"
5706+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
5707+ : "Ir" (i));
5708+ } else {
5709+ unsigned long flags;
5710+
5711+ raw_local_irq_save(flags);
5712+ __asm__ __volatile__(
5713+ " lw %0, %1 \n"
5714+#ifdef CONFIG_PAX_REFCOUNT
5715+ /* Exception on overflow. */
5716+ "1: sub %0, %2 \n"
5717+#else
5718+ " subu %0, %2 \n"
5719+#endif
5720+ " sw %0, %1 \n"
5721+#ifdef CONFIG_PAX_REFCOUNT
5722+ /* Note: Dest reg is not modified on overflow */
5723+ "2: \n"
5724+ _ASM_EXTABLE(1b, 2b)
5725+#endif
5726+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
5727+ raw_local_irq_restore(flags);
5728+ }
5729+
5730+ smp_llsc_mb();
5731+
5732+ return result;
5733+}
5734+static __inline__ int atomic_sub_return_unchecked(int i, atomic_unchecked_t *v)
5735 {
5736 int result;
5737
5738@@ -238,7 +562,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
5739 * Atomically test @v and subtract @i if @v is greater or equal than @i.
5740 * The function returns the old value of @v minus @i.
5741 */
5742-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5743+static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
5744 {
5745 int result;
5746
5747@@ -295,8 +619,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5748 return result;
5749 }
5750
5751-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
5752-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
5753+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
5754+{
5755+ return cmpxchg(&v->counter, old, new);
5756+}
5757+
5758+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
5759+ int new)
5760+{
5761+ return cmpxchg(&(v->counter), old, new);
5762+}
5763+
5764+static inline int atomic_xchg(atomic_t *v, int new)
5765+{
5766+ return xchg(&v->counter, new);
5767+}
5768+
5769+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
5770+{
5771+ return xchg(&(v->counter), new);
5772+}
5773
5774 /**
5775 * __atomic_add_unless - add unless the number is a given value
5776@@ -324,6 +666,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5777
5778 #define atomic_dec_return(v) atomic_sub_return(1, (v))
5779 #define atomic_inc_return(v) atomic_add_return(1, (v))
5780+#define atomic_inc_return_unchecked(v) atomic_add_return_unchecked(1, (v))
5781
5782 /*
5783 * atomic_sub_and_test - subtract value from variable and test result
5784@@ -345,6 +688,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5785 * other cases.
5786 */
5787 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
5788+#define atomic_inc_and_test_unchecked(v) (atomic_add_return_unchecked(1, (v)) == 0)
5789
5790 /*
5791 * atomic_dec_and_test - decrement by 1 and test
5792@@ -369,6 +713,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5793 * Atomically increments @v by 1.
5794 */
5795 #define atomic_inc(v) atomic_add(1, (v))
5796+#define atomic_inc_unchecked(v) atomic_add_unchecked(1, (v))
5797
5798 /*
5799 * atomic_dec - decrement and test
5800@@ -377,6 +722,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5801 * Atomically decrements @v by 1.
5802 */
5803 #define atomic_dec(v) atomic_sub(1, (v))
5804+#define atomic_dec_unchecked(v) atomic_sub_return_unchecked(1, (v))
5805
5806 /*
5807 * atomic_add_negative - add and test if negative
5808@@ -398,14 +744,30 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5809 * @v: pointer of type atomic64_t
5810 *
5811 */
5812-#define atomic64_read(v) (*(volatile long *)&(v)->counter)
5813+static inline long atomic64_read(const atomic64_t *v)
5814+{
5815+ return (*(volatile const long *) &v->counter);
5816+}
5817+
5818+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
5819+{
5820+ return (*(volatile const long *) &v->counter);
5821+}
5822
5823 /*
5824 * atomic64_set - set atomic variable
5825 * @v: pointer of type atomic64_t
5826 * @i: required value
5827 */
5828-#define atomic64_set(v, i) ((v)->counter = (i))
5829+static inline void atomic64_set(atomic64_t *v, long i)
5830+{
5831+ v->counter = i;
5832+}
5833+
5834+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
5835+{
5836+ v->counter = i;
5837+}
5838
5839 /*
5840 * atomic64_add - add integer to atomic variable
5841@@ -414,7 +776,66 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5842 *
5843 * Atomically adds @i to @v.
5844 */
5845-static __inline__ void atomic64_add(long i, atomic64_t * v)
5846+static __inline__ void atomic64_add(long i, atomic64_t *v)
5847+{
5848+ long temp;
5849+
5850+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5851+ __asm__ __volatile__(
5852+ " .set mips3 \n"
5853+ "1: lld %0, %1 # atomic64_add \n"
5854+#ifdef CONFIG_PAX_REFCOUNT
5855+ /* Exception on overflow. */
5856+ "2: dadd %0, %2 \n"
5857+#else
5858+ " daddu %0, %2 \n"
5859+#endif
5860+ " scd %0, %1 \n"
5861+ " beqzl %0, 1b \n"
5862+#ifdef CONFIG_PAX_REFCOUNT
5863+ "3: \n"
5864+ _ASM_EXTABLE(2b, 3b)
5865+#endif
5866+ " .set mips0 \n"
5867+ : "=&r" (temp), "+m" (v->counter)
5868+ : "Ir" (i));
5869+ } else if (kernel_uses_llsc) {
5870+ __asm__ __volatile__(
5871+ " .set mips3 \n"
5872+ "1: lld %0, %1 # atomic64_add \n"
5873+#ifdef CONFIG_PAX_REFCOUNT
5874+ /* Exception on overflow. */
5875+ "2: dadd %0, %2 \n"
5876+#else
5877+ " daddu %0, %2 \n"
5878+#endif
5879+ " scd %0, %1 \n"
5880+ " beqz %0, 1b \n"
5881+#ifdef CONFIG_PAX_REFCOUNT
5882+ "3: \n"
5883+ _ASM_EXTABLE(2b, 3b)
5884+#endif
5885+ " .set mips0 \n"
5886+ : "=&r" (temp), "+m" (v->counter)
5887+ : "Ir" (i));
5888+ } else {
5889+ unsigned long flags;
5890+
5891+ raw_local_irq_save(flags);
5892+ __asm__ __volatile__(
5893+#ifdef CONFIG_PAX_REFCOUNT
5894+ /* Exception on overflow. */
5895+ "1: dadd %0, %1 \n"
5896+ "2: \n"
5897+ _ASM_EXTABLE(1b, 2b)
5898+#else
5899+ " daddu %0, %1 \n"
5900+#endif
5901+ : "+r" (v->counter) : "Ir" (i));
5902+ raw_local_irq_restore(flags);
5903+ }
5904+}
5905+static __inline__ void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
5906 {
5907 if (kernel_uses_llsc && R10000_LLSC_WAR) {
5908 long temp;
5909@@ -457,7 +878,67 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
5910 *
5911 * Atomically subtracts @i from @v.
5912 */
5913-static __inline__ void atomic64_sub(long i, atomic64_t * v)
5914+static __inline__ void atomic64_sub(long i, atomic64_t *v)
5915+{
5916+ long temp;
5917+
5918+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5919+ __asm__ __volatile__(
5920+ " .set mips3 \n"
5921+ "1: lld %0, %1 # atomic64_sub \n"
5922+#ifdef CONFIG_PAX_REFCOUNT
5923+ /* Exception on overflow. */
5924+ "2: dsub %0, %2 \n"
5925+#else
5926+ " dsubu %0, %2 \n"
5927+#endif
5928+ " scd %0, %1 \n"
5929+ " beqzl %0, 1b \n"
5930+#ifdef CONFIG_PAX_REFCOUNT
5931+ "3: \n"
5932+ _ASM_EXTABLE(2b, 3b)
5933+#endif
5934+ " .set mips0 \n"
5935+ : "=&r" (temp), "+m" (v->counter)
5936+ : "Ir" (i));
5937+ } else if (kernel_uses_llsc) {
5938+ __asm__ __volatile__(
5939+ " .set mips3 \n"
5940+ "1: lld %0, %1 # atomic64_sub \n"
5941+#ifdef CONFIG_PAX_REFCOUNT
5942+ /* Exception on overflow. */
5943+ "2: dsub %0, %2 \n"
5944+#else
5945+ " dsubu %0, %2 \n"
5946+#endif
5947+ " scd %0, %1 \n"
5948+ " beqz %0, 1b \n"
5949+#ifdef CONFIG_PAX_REFCOUNT
5950+ "3: \n"
5951+ _ASM_EXTABLE(2b, 3b)
5952+#endif
5953+ " .set mips0 \n"
5954+ : "=&r" (temp), "+m" (v->counter)
5955+ : "Ir" (i));
5956+ } else {
5957+ unsigned long flags;
5958+
5959+ raw_local_irq_save(flags);
5960+ __asm__ __volatile__(
5961+#ifdef CONFIG_PAX_REFCOUNT
5962+ /* Exception on overflow. */
5963+ "1: dsub %0, %1 \n"
5964+ "2: \n"
5965+ _ASM_EXTABLE(1b, 2b)
5966+#else
5967+ " dsubu %0, %1 \n"
5968+#endif
5969+ : "+r" (v->counter) : "Ir" (i));
5970+ raw_local_irq_restore(flags);
5971+ }
5972+}
5973+
5974+static __inline__ void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
5975 {
5976 if (kernel_uses_llsc && R10000_LLSC_WAR) {
5977 long temp;
5978@@ -496,7 +977,93 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
5979 /*
5980 * Same as above, but return the result value
5981 */
5982-static __inline__ long atomic64_add_return(long i, atomic64_t * v)
5983+static __inline__ long atomic64_add_return(long i, atomic64_t *v)
5984+{
5985+ long result;
5986+ long temp;
5987+
5988+ smp_mb__before_llsc();
5989+
5990+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5991+ __asm__ __volatile__(
5992+ " .set mips3 \n"
5993+ "1: lld %1, %2 # atomic64_add_return \n"
5994+#ifdef CONFIG_PAX_REFCOUNT
5995+ "2: dadd %0, %1, %3 \n"
5996+#else
5997+ " daddu %0, %1, %3 \n"
5998+#endif
5999+ " scd %0, %2 \n"
6000+ " beqzl %0, 1b \n"
6001+#ifdef CONFIG_PAX_REFCOUNT
6002+ " b 4f \n"
6003+ " .set noreorder \n"
6004+ "3: b 5f \n"
6005+ " move %0, %1 \n"
6006+ " .set reorder \n"
6007+ _ASM_EXTABLE(2b, 3b)
6008+#endif
6009+ "4: daddu %0, %1, %3 \n"
6010+#ifdef CONFIG_PAX_REFCOUNT
6011+ "5: \n"
6012+#endif
6013+ " .set mips0 \n"
6014+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
6015+ : "Ir" (i));
6016+ } else if (kernel_uses_llsc) {
6017+ __asm__ __volatile__(
6018+ " .set mips3 \n"
6019+ "1: lld %1, %2 # atomic64_add_return \n"
6020+#ifdef CONFIG_PAX_REFCOUNT
6021+ "2: dadd %0, %1, %3 \n"
6022+#else
6023+ " daddu %0, %1, %3 \n"
6024+#endif
6025+ " scd %0, %2 \n"
6026+ " bnez %0, 4f \n"
6027+ " b 1b \n"
6028+#ifdef CONFIG_PAX_REFCOUNT
6029+ " .set noreorder \n"
6030+ "3: b 5f \n"
6031+ " move %0, %1 \n"
6032+ " .set reorder \n"
6033+ _ASM_EXTABLE(2b, 3b)
6034+#endif
6035+ "4: daddu %0, %1, %3 \n"
6036+#ifdef CONFIG_PAX_REFCOUNT
6037+ "5: \n"
6038+#endif
6039+ " .set mips0 \n"
6040+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6041+ : "Ir" (i), "m" (v->counter)
6042+ : "memory");
6043+ } else {
6044+ unsigned long flags;
6045+
6046+ raw_local_irq_save(flags);
6047+ __asm__ __volatile__(
6048+ " ld %0, %1 \n"
6049+#ifdef CONFIG_PAX_REFCOUNT
6050+ /* Exception on overflow. */
6051+ "1: dadd %0, %2 \n"
6052+#else
6053+ " daddu %0, %2 \n"
6054+#endif
6055+ " sd %0, %1 \n"
6056+#ifdef CONFIG_PAX_REFCOUNT
6057+ /* Note: Dest reg is not modified on overflow */
6058+ "2: \n"
6059+ _ASM_EXTABLE(1b, 2b)
6060+#endif
6061+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6062+ raw_local_irq_restore(flags);
6063+ }
6064+
6065+ smp_llsc_mb();
6066+
6067+ return result;
6068+}
6069+static __inline__ long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6070 {
6071 long result;
6072
6073@@ -546,7 +1113,97 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
6074 return result;
6075 }
6076
6077-static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
6078+static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
6079+{
6080+ long result;
6081+ long temp;
6082+
6083+ smp_mb__before_llsc();
6084+
6085+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6086+ long temp;
6087+
6088+ __asm__ __volatile__(
6089+ " .set mips3 \n"
6090+ "1: lld %1, %2 # atomic64_sub_return \n"
6091+#ifdef CONFIG_PAX_REFCOUNT
6092+ "2: dsub %0, %1, %3 \n"
6093+#else
6094+ " dsubu %0, %1, %3 \n"
6095+#endif
6096+ " scd %0, %2 \n"
6097+ " beqzl %0, 1b \n"
6098+#ifdef CONFIG_PAX_REFCOUNT
6099+ " b 4f \n"
6100+ " .set noreorder \n"
6101+ "3: b 5f \n"
6102+ " move %0, %1 \n"
6103+ " .set reorder \n"
6104+ _ASM_EXTABLE(2b, 3b)
6105+#endif
6106+ "4: dsubu %0, %1, %3 \n"
6107+#ifdef CONFIG_PAX_REFCOUNT
6108+ "5: \n"
6109+#endif
6110+ " .set mips0 \n"
6111+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6112+ : "Ir" (i), "m" (v->counter)
6113+ : "memory");
6114+ } else if (kernel_uses_llsc) {
6115+ __asm__ __volatile__(
6116+ " .set mips3 \n"
6117+ "1: lld %1, %2 # atomic64_sub_return \n"
6118+#ifdef CONFIG_PAX_REFCOUNT
6119+ "2: dsub %0, %1, %3 \n"
6120+#else
6121+ " dsubu %0, %1, %3 \n"
6122+#endif
6123+ " scd %0, %2 \n"
6124+ " bnez %0, 4f \n"
6125+ " b 1b \n"
6126+#ifdef CONFIG_PAX_REFCOUNT
6127+ " .set noreorder \n"
6128+ "3: b 5f \n"
6129+ " move %0, %1 \n"
6130+ " .set reorder \n"
6131+ _ASM_EXTABLE(2b, 3b)
6132+#endif
6133+ "4: dsubu %0, %1, %3 \n"
6134+#ifdef CONFIG_PAX_REFCOUNT
6135+ "5: \n"
6136+#endif
6137+ " .set mips0 \n"
6138+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6139+ : "Ir" (i), "m" (v->counter)
6140+ : "memory");
6141+ } else {
6142+ unsigned long flags;
6143+
6144+ raw_local_irq_save(flags);
6145+ __asm__ __volatile__(
6146+ " ld %0, %1 \n"
6147+#ifdef CONFIG_PAX_REFCOUNT
6148+ /* Exception on overflow. */
6149+ "1: dsub %0, %2 \n"
6150+#else
6151+ " dsubu %0, %2 \n"
6152+#endif
6153+ " sd %0, %1 \n"
6154+#ifdef CONFIG_PAX_REFCOUNT
6155+ /* Note: Dest reg is not modified on overflow */
6156+ "2: \n"
6157+ _ASM_EXTABLE(1b, 2b)
6158+#endif
6159+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6160+ raw_local_irq_restore(flags);
6161+ }
6162+
6163+ smp_llsc_mb();
6164+
6165+ return result;
6166+}
6167+
6168+static __inline__ long atomic64_sub_return_unchecked(long i, atomic64_unchecked_t *v)
6169 {
6170 long result;
6171
6172@@ -605,7 +1262,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
6173 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6174 * The function returns the old value of @v minus @i.
6175 */
6176-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6177+static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
6178 {
6179 long result;
6180
6181@@ -662,9 +1319,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6182 return result;
6183 }
6184
6185-#define atomic64_cmpxchg(v, o, n) \
6186- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
6187-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
6188+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6189+{
6190+ return cmpxchg(&v->counter, old, new);
6191+}
6192+
6193+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
6194+ long new)
6195+{
6196+ return cmpxchg(&(v->counter), old, new);
6197+}
6198+
6199+static inline long atomic64_xchg(atomic64_t *v, long new)
6200+{
6201+ return xchg(&v->counter, new);
6202+}
6203+
6204+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
6205+{
6206+ return xchg(&(v->counter), new);
6207+}
6208
6209 /**
6210 * atomic64_add_unless - add unless the number is a given value
6211@@ -694,6 +1368,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6212
6213 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
6214 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
6215+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
6216
6217 /*
6218 * atomic64_sub_and_test - subtract value from variable and test result
6219@@ -715,6 +1390,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6220 * other cases.
6221 */
6222 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
6223+#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
6224
6225 /*
6226 * atomic64_dec_and_test - decrement by 1 and test
6227@@ -739,6 +1415,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6228 * Atomically increments @v by 1.
6229 */
6230 #define atomic64_inc(v) atomic64_add(1, (v))
6231+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
6232
6233 /*
6234 * atomic64_dec - decrement and test
6235@@ -747,6 +1424,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6236 * Atomically decrements @v by 1.
6237 */
6238 #define atomic64_dec(v) atomic64_sub(1, (v))
6239+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
bb5f0bf8
AF
6240
6241 /*
e2b79cd1 6242 * atomic64_add_negative - add and test if negative
bb5f0bf8
AF
6243diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
6244index b4db69f..8f3b093 100644
6245--- a/arch/mips/include/asm/cache.h
6246+++ b/arch/mips/include/asm/cache.h
6247@@ -9,10 +9,11 @@
6248 #ifndef _ASM_CACHE_H
6249 #define _ASM_CACHE_H
6250
6251+#include <linux/const.h>
6252 #include <kmalloc.h>
6253
6254 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
6255-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6256+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6257
6258 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
6259 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6260diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
6261index cf3ae24..238d22f 100644
6262--- a/arch/mips/include/asm/elf.h
6263+++ b/arch/mips/include/asm/elf.h
6264@@ -372,13 +372,16 @@ extern const char *__elf_platform;
6265 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
6266 #endif
6267
6268+#ifdef CONFIG_PAX_ASLR
6269+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6270+
6271+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6272+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6273+#endif
6274+
6275 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
6276 struct linux_binprm;
6277 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6278 int uses_interp);
6279
6280-struct mm_struct;
6281-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
6282-#define arch_randomize_brk arch_randomize_brk
6283-
6284 #endif /* _ASM_ELF_H */
6285diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
6286index c1f6afa..38cc6e9 100644
6287--- a/arch/mips/include/asm/exec.h
6288+++ b/arch/mips/include/asm/exec.h
6289@@ -12,6 +12,6 @@
6290 #ifndef _ASM_EXEC_H
6291 #define _ASM_EXEC_H
6292
6293-extern unsigned long arch_align_stack(unsigned long sp);
6294+#define arch_align_stack(x) ((x) & ~0xfUL)
6295
6296 #endif /* _ASM_EXEC_H */
6297diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
6298index d44622c..64990d2 100644
6299--- a/arch/mips/include/asm/local.h
6300+++ b/arch/mips/include/asm/local.h
6301@@ -12,15 +12,25 @@ typedef struct
6302 atomic_long_t a;
6303 } local_t;
6304
6305+typedef struct {
6306+ atomic_long_unchecked_t a;
6307+} local_unchecked_t;
6308+
6309 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
6310
6311 #define local_read(l) atomic_long_read(&(l)->a)
6312+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
6313 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
6314+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
6315
6316 #define local_add(i, l) atomic_long_add((i), (&(l)->a))
6317+#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
6318 #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
6319+#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
6320 #define local_inc(l) atomic_long_inc(&(l)->a)
6321+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
6322 #define local_dec(l) atomic_long_dec(&(l)->a)
6323+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
6324
6325 /*
6326 * Same as above, but return the result value
6327@@ -70,6 +80,51 @@ static __inline__ long local_add_return(long i, local_t * l)
6328 return result;
6329 }
6330
6331+static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
6332+{
6333+ unsigned long result;
6334+
6335+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6336+ unsigned long temp;
6337+
6338+ __asm__ __volatile__(
6339+ " .set mips3 \n"
6340+ "1:" __LL "%1, %2 # local_add_return \n"
6341+ " addu %0, %1, %3 \n"
6342+ __SC "%0, %2 \n"
6343+ " beqzl %0, 1b \n"
6344+ " addu %0, %1, %3 \n"
6345+ " .set mips0 \n"
6346+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6347+ : "Ir" (i), "m" (l->a.counter)
6348+ : "memory");
6349+ } else if (kernel_uses_llsc) {
6350+ unsigned long temp;
6351+
6352+ __asm__ __volatile__(
6353+ " .set mips3 \n"
6354+ "1:" __LL "%1, %2 # local_add_return \n"
6355+ " addu %0, %1, %3 \n"
6356+ __SC "%0, %2 \n"
6357+ " beqz %0, 1b \n"
6358+ " addu %0, %1, %3 \n"
6359+ " .set mips0 \n"
6360+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6361+ : "Ir" (i), "m" (l->a.counter)
6362+ : "memory");
6363+ } else {
6364+ unsigned long flags;
6365+
6366+ local_irq_save(flags);
6367+ result = l->a.counter;
6368+ result += i;
6369+ l->a.counter = result;
6370+ local_irq_restore(flags);
6371+ }
6372+
6373+ return result;
6374+}
6375+
6376 static __inline__ long local_sub_return(long i, local_t * l)
6377 {
6378 unsigned long result;
6379@@ -117,6 +172,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
6380
6381 #define local_cmpxchg(l, o, n) \
6382 ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6383+#define local_cmpxchg_unchecked(l, o, n) \
6384+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6385 #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
6386
6387 /**
6388diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
6389index f59552f..3abe9b9 100644
6390--- a/arch/mips/include/asm/page.h
6391+++ b/arch/mips/include/asm/page.h
6392@@ -95,7 +95,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
6393 #ifdef CONFIG_CPU_MIPS32
6394 typedef struct { unsigned long pte_low, pte_high; } pte_t;
6395 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
6396- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
6397+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
6398 #else
6399 typedef struct { unsigned long long pte; } pte_t;
6400 #define pte_val(x) ((x).pte)
6401diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
6402index 881d18b..cea38bc 100644
6403--- a/arch/mips/include/asm/pgalloc.h
6404+++ b/arch/mips/include/asm/pgalloc.h
6405@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6406 {
6407 set_pud(pud, __pud((unsigned long)pmd));
6408 }
6409+
6410+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6411+{
6412+ pud_populate(mm, pud, pmd);
6413+}
6414 #endif
6415
6416 /*
6417diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
6418index 895320e..bf63e10 100644
6419--- a/arch/mips/include/asm/thread_info.h
6420+++ b/arch/mips/include/asm/thread_info.h
6421@@ -115,6 +115,8 @@ static inline struct thread_info *current_thread_info(void)
6422 #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
6423 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
6424 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
6425+/* li takes a 32bit immediate */
6426+#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
6427 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
6428
6429 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
6430@@ -130,15 +132,18 @@ static inline struct thread_info *current_thread_info(void)
6431 #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
6432 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
6433 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
6434+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6435+
6436+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
6437
6438 /* work to do in syscall_trace_leave() */
6439-#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
6440+#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
6441
6442 /* work to do on interrupt/exception return */
6443 #define _TIF_WORK_MASK \
6444 (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME)
6445 /* work to do on any return to u-space */
6446-#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT)
6447+#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT | _TIF_GRSEC_SETXID)
6448
6449 #endif /* __KERNEL__ */
6450
6451diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
6452index 1188e00..41cf144 100644
6453--- a/arch/mips/kernel/binfmt_elfn32.c
6454+++ b/arch/mips/kernel/binfmt_elfn32.c
6455@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6456 #undef ELF_ET_DYN_BASE
6457 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6458
6459+#ifdef CONFIG_PAX_ASLR
6460+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6461+
6462+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6463+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6464+#endif
6465+
6466 #include <asm/processor.h>
6467 #include <linux/module.h>
6468 #include <linux/elfcore.h>
6469diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
6470index 202e581..689ca79 100644
6471--- a/arch/mips/kernel/binfmt_elfo32.c
6472+++ b/arch/mips/kernel/binfmt_elfo32.c
6473@@ -56,6 +56,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6474 #undef ELF_ET_DYN_BASE
6475 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6476
6477+#ifdef CONFIG_PAX_ASLR
6478+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6479+
6480+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6481+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6482+#endif
6483+
6484 #include <asm/processor.h>
6485
6486 /*
6487diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
6488index c6a041d..b3e7318 100644
6489--- a/arch/mips/kernel/process.c
6490+++ b/arch/mips/kernel/process.c
6491@@ -563,15 +563,3 @@ unsigned long get_wchan(struct task_struct *task)
6492 out:
6493 return pc;
6494 }
6495-
6496-/*
6497- * Don't forget that the stack pointer must be aligned on a 8 bytes
6498- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
6499- */
6500-unsigned long arch_align_stack(unsigned long sp)
6501-{
6502- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6503- sp -= get_random_int() & ~PAGE_MASK;
6504-
6505- return sp & ALMASK;
6506-}
6507diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
6508index 9c6299c..2fb4c22 100644
6509--- a/arch/mips/kernel/ptrace.c
6510+++ b/arch/mips/kernel/ptrace.c
6511@@ -528,6 +528,10 @@ static inline int audit_arch(void)
6512 return arch;
6513 }
6514
6515+#ifdef CONFIG_GRKERNSEC_SETXID
6516+extern void gr_delayed_cred_worker(void);
6517+#endif
6518+
6519 /*
6520 * Notification of system call entry/exit
6521 * - triggered by current->work.syscall_trace
6522@@ -537,6 +541,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
6523 /* do the secure computing check first */
6524 secure_computing_strict(regs->regs[2]);
6525
6526+#ifdef CONFIG_GRKERNSEC_SETXID
6527+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6528+ gr_delayed_cred_worker();
6529+#endif
6530+
6531 if (!(current->ptrace & PT_PTRACED))
6532 goto out;
6533
6534diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
6535index 9b36424..e7f4154 100644
6536--- a/arch/mips/kernel/scall32-o32.S
6537+++ b/arch/mips/kernel/scall32-o32.S
6538@@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
6539
6540 stack_done:
6541 lw t0, TI_FLAGS($28) # syscall tracing enabled?
6542- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
6543+ li t1, _TIF_SYSCALL_WORK
6544 and t0, t1
6545 bnez t0, syscall_trace_entry # -> yes
6546
6547diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
6548index 97a5909..59622f8 100644
6549--- a/arch/mips/kernel/scall64-64.S
6550+++ b/arch/mips/kernel/scall64-64.S
6551@@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
6552
6553 sd a3, PT_R26(sp) # save a3 for syscall restarting
6554
6555- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
6556+ li t1, _TIF_SYSCALL_WORK
6557 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
6558 and t0, t1, t0
6559 bnez t0, syscall_trace_entry
6560diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
6561index edcb659..fb2ab09 100644
6562--- a/arch/mips/kernel/scall64-n32.S
6563+++ b/arch/mips/kernel/scall64-n32.S
6564@@ -47,7 +47,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
6565
6566 sd a3, PT_R26(sp) # save a3 for syscall restarting
6567
6568- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
6569+ li t1, _TIF_SYSCALL_WORK
6570 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
6571 and t0, t1, t0
6572 bnez t0, n32_syscall_trace_entry
6573diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
6574index 74f485d..47d2c38 100644
6575--- a/arch/mips/kernel/scall64-o32.S
6576+++ b/arch/mips/kernel/scall64-o32.S
6577@@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
6578 PTR 4b, bad_stack
6579 .previous
6580
6581- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
6582+ li t1, _TIF_SYSCALL_WORK
6583 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
6584 and t0, t1, t0
6585 bnez t0, trace_a_syscall
e2b79cd1
AF
6586diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
6587index a75ae40..0d0f56a 100644
6588--- a/arch/mips/kernel/traps.c
6589+++ b/arch/mips/kernel/traps.c
6590@@ -675,7 +675,17 @@ asmlinkage void do_ov(struct pt_regs *regs)
6591 {
6592 siginfo_t info;
6593
6594- die_if_kernel("Integer overflow", regs);
6595+ if (unlikely(!user_mode(regs))) {
6596+
6597+#ifdef CONFIG_PAX_REFCOUNT
6598+ if (fixup_exception(regs)) {
6599+ pax_report_refcount_overflow(regs);
6600+ return;
6601+ }
6602+#endif
6603+
6604+ die("Integer overflow", regs);
6605+ }
6606
6607 info.si_code = FPE_INTOVF;
6608 info.si_signo = SIGFPE;
bb5f0bf8
AF
6609diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
6610index 0fead53..eeb00a6 100644
6611--- a/arch/mips/mm/fault.c
6612+++ b/arch/mips/mm/fault.c
6613@@ -27,6 +27,23 @@
6614 #include <asm/highmem.h> /* For VMALLOC_END */
6615 #include <linux/kdebug.h>
6616
6617+#ifdef CONFIG_PAX_PAGEEXEC
6618+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6619+{
6620+ unsigned long i;
6621+
6622+ printk(KERN_ERR "PAX: bytes at PC: ");
6623+ for (i = 0; i < 5; i++) {
6624+ unsigned int c;
6625+ if (get_user(c, (unsigned int *)pc+i))
6626+ printk(KERN_CONT "???????? ");
6627+ else
6628+ printk(KERN_CONT "%08x ", c);
6629+ }
6630+ printk("\n");
6631+}
6632+#endif
6633+
6634 /*
6635 * This routine handles page faults. It determines the address,
6636 * and the problem, and then passes it off to one of the appropriate
6637@@ -196,6 +213,14 @@ bad_area:
6638 bad_area_nosemaphore:
6639 /* User mode accesses just cause a SIGSEGV */
6640 if (user_mode(regs)) {
6641+
6642+#ifdef CONFIG_PAX_PAGEEXEC
6643+ if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
6644+ pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
6645+ do_group_exit(SIGKILL);
6646+ }
6647+#endif
6648+
6649 tsk->thread.cp0_badvaddr = address;
6650 tsk->thread.error_code = write;
6651 #if 0
6652diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
6653index 7e5fe27..9656513 100644
6654--- a/arch/mips/mm/mmap.c
6655+++ b/arch/mips/mm/mmap.c
6656@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6657 struct vm_area_struct *vma;
6658 unsigned long addr = addr0;
6659 int do_color_align;
6660+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
6661 struct vm_unmapped_area_info info;
6662
6663 if (unlikely(len > TASK_SIZE))
6664@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6665 do_color_align = 1;
6666
6667 /* requesting a specific address */
6668+
6669+#ifdef CONFIG_PAX_RANDMMAP
6670+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
6671+#endif
6672+
6673 if (addr) {
6674 if (do_color_align)
6675 addr = COLOUR_ALIGN(addr, pgoff);
6676@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6677 addr = PAGE_ALIGN(addr);
6678
6679 vma = find_vma(mm, addr);
6680- if (TASK_SIZE - len >= addr &&
6681- (!vma || addr + len <= vma->vm_start))
6682+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len, offset))
6683 return addr;
6684 }
6685
6686 info.length = len;
6687 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
6688 info.align_offset = pgoff << PAGE_SHIFT;
6689+ info.threadstack_offset = offset;
6690
6691 if (dir == DOWN) {
6692 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
6693@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6694 {
6695 unsigned long random_factor = 0UL;
6696
6697+#ifdef CONFIG_PAX_RANDMMAP
6698+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
6699+#endif
6700+
6701 if (current->flags & PF_RANDOMIZE) {
6702 random_factor = get_random_int();
6703 random_factor = random_factor << PAGE_SHIFT;
6704@@ -157,42 +167,27 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6705
6706 if (mmap_is_legacy()) {
6707 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
6708+
6709+#ifdef CONFIG_PAX_RANDMMAP
6710+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6711+ mm->mmap_base += mm->delta_mmap;
6712+#endif
6713+
6714 mm->get_unmapped_area = arch_get_unmapped_area;
6715 mm->unmap_area = arch_unmap_area;
6716 } else {
6717 mm->mmap_base = mmap_base(random_factor);
6718+
6719+#ifdef CONFIG_PAX_RANDMMAP
6720+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6721+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
6722+#endif
6723+
6724 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
6725 mm->unmap_area = arch_unmap_area_topdown;
6726 }
6727 }
6728
6729-static inline unsigned long brk_rnd(void)
6730-{
6731- unsigned long rnd = get_random_int();
6732-
6733- rnd = rnd << PAGE_SHIFT;
6734- /* 8MB for 32bit, 256MB for 64bit */
6735- if (TASK_IS_32BIT_ADDR)
6736- rnd = rnd & 0x7ffffful;
6737- else
6738- rnd = rnd & 0xffffffful;
6739-
6740- return rnd;
6741-}
6742-
6743-unsigned long arch_randomize_brk(struct mm_struct *mm)
6744-{
6745- unsigned long base = mm->brk;
6746- unsigned long ret;
6747-
6748- ret = PAGE_ALIGN(base + brk_rnd());
6749-
6750- if (ret < mm->brk)
6751- return mm->brk;
6752-
6753- return ret;
6754-}
6755-
6756 int __virt_addr_valid(const volatile void *kaddr)
6757 {
6758 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
6759diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
6760index 967d144..db12197 100644
6761--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
6762+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
6763@@ -11,12 +11,14 @@
6764 #ifndef _ASM_PROC_CACHE_H
6765 #define _ASM_PROC_CACHE_H
6766
6767+#include <linux/const.h>
6768+
6769 /* L1 cache */
6770
6771 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
6772 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
6773-#define L1_CACHE_BYTES 16 /* bytes per entry */
6774 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
6775+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
6776 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
6777
6778 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
6779diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
6780index bcb5df2..84fabd2 100644
6781--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
6782+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
6783@@ -16,13 +16,15 @@
6784 #ifndef _ASM_PROC_CACHE_H
6785 #define _ASM_PROC_CACHE_H
6786
6787+#include <linux/const.h>
6788+
6789 /*
6790 * L1 cache
6791 */
6792 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
6793 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
6794-#define L1_CACHE_BYTES 32 /* bytes per entry */
6795 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
6796+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
6797 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
6798
6799 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
6800diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
6801index 4ce7a01..449202a 100644
6802--- a/arch/openrisc/include/asm/cache.h
6803+++ b/arch/openrisc/include/asm/cache.h
6804@@ -19,11 +19,13 @@
6805 #ifndef __ASM_OPENRISC_CACHE_H
6806 #define __ASM_OPENRISC_CACHE_H
6807
6808+#include <linux/const.h>
6809+
6810 /* FIXME: How can we replace these with values from the CPU...
6811 * they shouldn't be hard-coded!
6812 */
6813
6814-#define L1_CACHE_BYTES 16
6815 #define L1_CACHE_SHIFT 4
6816+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6817
6818 #endif /* __ASM_OPENRISC_CACHE_H */
6819diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
6820index 472886c..00e7df9 100644
6821--- a/arch/parisc/include/asm/atomic.h
6822+++ b/arch/parisc/include/asm/atomic.h
6823@@ -252,6 +252,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
6824 return dec;
6825 }
6826
6827+#define atomic64_read_unchecked(v) atomic64_read(v)
6828+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
6829+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
6830+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
6831+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
6832+#define atomic64_inc_unchecked(v) atomic64_inc(v)
6833+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
6834+#define atomic64_dec_unchecked(v) atomic64_dec(v)
6835+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
6836+
6837 #endif /* !CONFIG_64BIT */
6838
6839
6840diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
6841index 47f11c7..3420df2 100644
6842--- a/arch/parisc/include/asm/cache.h
6843+++ b/arch/parisc/include/asm/cache.h
6844@@ -5,6 +5,7 @@
6845 #ifndef __ARCH_PARISC_CACHE_H
6846 #define __ARCH_PARISC_CACHE_H
6847
6848+#include <linux/const.h>
6849
6850 /*
6851 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
6852@@ -15,13 +16,13 @@
6853 * just ruin performance.
6854 */
6855 #ifdef CONFIG_PA20
6856-#define L1_CACHE_BYTES 64
6857 #define L1_CACHE_SHIFT 6
6858 #else
6859-#define L1_CACHE_BYTES 32
6860 #define L1_CACHE_SHIFT 5
6861 #endif
6862
6863+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6864+
6865 #ifndef __ASSEMBLY__
6866
6867 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6868diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
6869index ad2b503..bdf1651 100644
6870--- a/arch/parisc/include/asm/elf.h
6871+++ b/arch/parisc/include/asm/elf.h
6872@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
6873
6874 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
6875
6876+#ifdef CONFIG_PAX_ASLR
6877+#define PAX_ELF_ET_DYN_BASE 0x10000UL
6878+
6879+#define PAX_DELTA_MMAP_LEN 16
6880+#define PAX_DELTA_STACK_LEN 16
6881+#endif
6882+
6883 /* This yields a mask that user programs can use to figure out what
6884 instruction set this CPU supports. This could be done in user space,
6885 but it's not easy, and we've already done it here. */
6886diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
6887index fc987a1..6e068ef 100644
6888--- a/arch/parisc/include/asm/pgalloc.h
6889+++ b/arch/parisc/include/asm/pgalloc.h
6890@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
6891 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
6892 }
6893
6894+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
6895+{
6896+ pgd_populate(mm, pgd, pmd);
6897+}
6898+
6899 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
6900 {
6901 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
6902@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
6903 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
6904 #define pmd_free(mm, x) do { } while (0)
6905 #define pgd_populate(mm, pmd, pte) BUG()
6906+#define pgd_populate_kernel(mm, pmd, pte) BUG()
6907
6908 #endif
6909
6910diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
6911index 1e40d7f..a3eb445 100644
6912--- a/arch/parisc/include/asm/pgtable.h
6913+++ b/arch/parisc/include/asm/pgtable.h
6914@@ -223,6 +223,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
6915 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
6916 #define PAGE_COPY PAGE_EXECREAD
6917 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
6918+
6919+#ifdef CONFIG_PAX_PAGEEXEC
6920+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
6921+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
6922+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
6923+#else
6924+# define PAGE_SHARED_NOEXEC PAGE_SHARED
6925+# define PAGE_COPY_NOEXEC PAGE_COPY
6926+# define PAGE_READONLY_NOEXEC PAGE_READONLY
6927+#endif
6928+
6929 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
6930 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
6931 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
6932diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
6933index e0a8235..ce2f1e1 100644
6934--- a/arch/parisc/include/asm/uaccess.h
6935+++ b/arch/parisc/include/asm/uaccess.h
6936@@ -245,10 +245,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
6937 const void __user *from,
6938 unsigned long n)
6939 {
6940- int sz = __compiletime_object_size(to);
6941+ size_t sz = __compiletime_object_size(to);
6942 int ret = -EFAULT;
6943
6944- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
6945+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
6946 ret = __copy_from_user(to, from, n);
6947 else
6948 copy_from_user_overflow();
6949diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
6950index 2a625fb..9908930 100644
6951--- a/arch/parisc/kernel/module.c
6952+++ b/arch/parisc/kernel/module.c
6953@@ -98,16 +98,38 @@
6954
6955 /* three functions to determine where in the module core
6956 * or init pieces the location is */
6957+static inline int in_init_rx(struct module *me, void *loc)
6958+{
6959+ return (loc >= me->module_init_rx &&
6960+ loc < (me->module_init_rx + me->init_size_rx));
6961+}
6962+
6963+static inline int in_init_rw(struct module *me, void *loc)
6964+{
6965+ return (loc >= me->module_init_rw &&
6966+ loc < (me->module_init_rw + me->init_size_rw));
6967+}
6968+
6969 static inline int in_init(struct module *me, void *loc)
6970 {
6971- return (loc >= me->module_init &&
6972- loc <= (me->module_init + me->init_size));
6973+ return in_init_rx(me, loc) || in_init_rw(me, loc);
6974+}
6975+
6976+static inline int in_core_rx(struct module *me, void *loc)
6977+{
6978+ return (loc >= me->module_core_rx &&
6979+ loc < (me->module_core_rx + me->core_size_rx));
6980+}
6981+
6982+static inline int in_core_rw(struct module *me, void *loc)
6983+{
6984+ return (loc >= me->module_core_rw &&
6985+ loc < (me->module_core_rw + me->core_size_rw));
6986 }
6987
6988 static inline int in_core(struct module *me, void *loc)
6989 {
6990- return (loc >= me->module_core &&
6991- loc <= (me->module_core + me->core_size));
6992+ return in_core_rx(me, loc) || in_core_rw(me, loc);
6993 }
6994
6995 static inline int in_local(struct module *me, void *loc)
6996@@ -371,13 +393,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
6997 }
6998
6999 /* align things a bit */
7000- me->core_size = ALIGN(me->core_size, 16);
7001- me->arch.got_offset = me->core_size;
7002- me->core_size += gots * sizeof(struct got_entry);
7003+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7004+ me->arch.got_offset = me->core_size_rw;
7005+ me->core_size_rw += gots * sizeof(struct got_entry);
7006
7007- me->core_size = ALIGN(me->core_size, 16);
7008- me->arch.fdesc_offset = me->core_size;
7009- me->core_size += fdescs * sizeof(Elf_Fdesc);
7010+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7011+ me->arch.fdesc_offset = me->core_size_rw;
7012+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
7013
7014 me->arch.got_max = gots;
7015 me->arch.fdesc_max = fdescs;
7016@@ -395,7 +417,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7017
7018 BUG_ON(value == 0);
7019
7020- got = me->module_core + me->arch.got_offset;
7021+ got = me->module_core_rw + me->arch.got_offset;
7022 for (i = 0; got[i].addr; i++)
7023 if (got[i].addr == value)
7024 goto out;
7025@@ -413,7 +435,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7026 #ifdef CONFIG_64BIT
7027 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7028 {
7029- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
7030+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
7031
7032 if (!value) {
7033 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
7034@@ -431,7 +453,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7035
7036 /* Create new one */
7037 fdesc->addr = value;
7038- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7039+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7040 return (Elf_Addr)fdesc;
7041 }
7042 #endif /* CONFIG_64BIT */
7043@@ -843,7 +865,7 @@ register_unwind_table(struct module *me,
7044
7045 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
7046 end = table + sechdrs[me->arch.unwind_section].sh_size;
7047- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7048+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7049
7050 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
7051 me->arch.unwind_section, table, end, gp);
7052diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
7053index 5dfd248..64914ac 100644
7054--- a/arch/parisc/kernel/sys_parisc.c
7055+++ b/arch/parisc/kernel/sys_parisc.c
7056@@ -33,9 +33,11 @@
7057 #include <linux/utsname.h>
7058 #include <linux/personality.h>
7059
7060-static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
7061+static unsigned long get_unshared_area(struct file *filp, unsigned long addr, unsigned long len,
7062+ unsigned long flags)
7063 {
7064 struct vm_unmapped_area_info info;
7065+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7066
7067 info.flags = 0;
7068 info.length = len;
7069@@ -43,6 +45,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
7070 info.high_limit = TASK_SIZE;
7071 info.align_mask = 0;
7072 info.align_offset = 0;
7073+ info.threadstack_offset = offset;
7074 return vm_unmapped_area(&info);
7075 }
7076
7077@@ -61,10 +64,11 @@ static int get_offset(struct address_space *mapping)
7078 return (unsigned long) mapping >> 8;
7079 }
7080
7081-static unsigned long get_shared_area(struct address_space *mapping,
7082- unsigned long addr, unsigned long len, unsigned long pgoff)
7083+static unsigned long get_shared_area(struct file *filp, struct address_space *mapping,
7084+ unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
7085 {
7086 struct vm_unmapped_area_info info;
7087+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7088
7089 info.flags = 0;
7090 info.length = len;
7091@@ -72,6 +76,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
7092 info.high_limit = TASK_SIZE;
7093 info.align_mask = PAGE_MASK & (SHMLBA - 1);
7094 info.align_offset = (get_offset(mapping) + pgoff) << PAGE_SHIFT;
7095+ info.threadstack_offset = offset;
7096 return vm_unmapped_area(&info);
7097 }
7098
7099@@ -86,15 +91,22 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7100 return -EINVAL;
7101 return addr;
7102 }
7103- if (!addr)
7104+ if (!addr) {
7105 addr = TASK_UNMAPPED_BASE;
7106
7107+#ifdef CONFIG_PAX_RANDMMAP
7108+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
7109+ addr += current->mm->delta_mmap;
7110+#endif
7111+
7112+ }
7113+
7114 if (filp) {
7115- addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
7116+ addr = get_shared_area(filp, filp->f_mapping, addr, len, pgoff, flags);
7117 } else if(flags & MAP_SHARED) {
7118- addr = get_shared_area(NULL, addr, len, pgoff);
7119+ addr = get_shared_area(filp, NULL, addr, len, pgoff, flags);
7120 } else {
7121- addr = get_unshared_area(addr, len);
7122+ addr = get_unshared_area(filp, addr, len, flags);
7123 }
7124 return addr;
7125 }
7126diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
7127index 04e47c6..7a8faf6 100644
7128--- a/arch/parisc/kernel/traps.c
7129+++ b/arch/parisc/kernel/traps.c
7130@@ -727,9 +727,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
7131
7132 down_read(&current->mm->mmap_sem);
7133 vma = find_vma(current->mm,regs->iaoq[0]);
7134- if (vma && (regs->iaoq[0] >= vma->vm_start)
7135- && (vma->vm_flags & VM_EXEC)) {
7136-
7137+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
7138 fault_address = regs->iaoq[0];
7139 fault_space = regs->iasq[0];
7140
7141diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
7142index f247a34..dc0f219 100644
7143--- a/arch/parisc/mm/fault.c
7144+++ b/arch/parisc/mm/fault.c
7145@@ -15,6 +15,7 @@
7146 #include <linux/sched.h>
7147 #include <linux/interrupt.h>
7148 #include <linux/module.h>
7149+#include <linux/unistd.h>
7150
7151 #include <asm/uaccess.h>
7152 #include <asm/traps.h>
7153@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
7154 static unsigned long
7155 parisc_acctyp(unsigned long code, unsigned int inst)
7156 {
7157- if (code == 6 || code == 16)
7158+ if (code == 6 || code == 7 || code == 16)
7159 return VM_EXEC;
7160
7161 switch (inst & 0xf0000000) {
7162@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
7163 }
7164 #endif
7165
7166+#ifdef CONFIG_PAX_PAGEEXEC
7167+/*
7168+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
7169+ *
7170+ * returns 1 when task should be killed
7171+ * 2 when rt_sigreturn trampoline was detected
7172+ * 3 when unpatched PLT trampoline was detected
7173+ */
7174+static int pax_handle_fetch_fault(struct pt_regs *regs)
7175+{
7176+
7177+#ifdef CONFIG_PAX_EMUPLT
7178+ int err;
7179+
7180+ do { /* PaX: unpatched PLT emulation */
7181+ unsigned int bl, depwi;
7182+
7183+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
7184+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
7185+
7186+ if (err)
7187+ break;
7188+
7189+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
7190+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
7191+
7192+ err = get_user(ldw, (unsigned int *)addr);
7193+ err |= get_user(bv, (unsigned int *)(addr+4));
7194+ err |= get_user(ldw2, (unsigned int *)(addr+8));
7195+
7196+ if (err)
7197+ break;
7198+
7199+ if (ldw == 0x0E801096U &&
7200+ bv == 0xEAC0C000U &&
7201+ ldw2 == 0x0E881095U)
7202+ {
7203+ unsigned int resolver, map;
7204+
7205+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
7206+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
7207+ if (err)
7208+ break;
7209+
7210+ regs->gr[20] = instruction_pointer(regs)+8;
7211+ regs->gr[21] = map;
7212+ regs->gr[22] = resolver;
7213+ regs->iaoq[0] = resolver | 3UL;
7214+ regs->iaoq[1] = regs->iaoq[0] + 4;
7215+ return 3;
7216+ }
7217+ }
7218+ } while (0);
7219+#endif
7220+
7221+#ifdef CONFIG_PAX_EMUTRAMP
7222+
7223+#ifndef CONFIG_PAX_EMUSIGRT
7224+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
7225+ return 1;
7226+#endif
7227+
7228+ do { /* PaX: rt_sigreturn emulation */
7229+ unsigned int ldi1, ldi2, bel, nop;
7230+
7231+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
7232+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
7233+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
7234+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
7235+
7236+ if (err)
7237+ break;
7238+
7239+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
7240+ ldi2 == 0x3414015AU &&
7241+ bel == 0xE4008200U &&
7242+ nop == 0x08000240U)
7243+ {
7244+ regs->gr[25] = (ldi1 & 2) >> 1;
7245+ regs->gr[20] = __NR_rt_sigreturn;
7246+ regs->gr[31] = regs->iaoq[1] + 16;
7247+ regs->sr[0] = regs->iasq[1];
7248+ regs->iaoq[0] = 0x100UL;
7249+ regs->iaoq[1] = regs->iaoq[0] + 4;
7250+ regs->iasq[0] = regs->sr[2];
7251+ regs->iasq[1] = regs->sr[2];
7252+ return 2;
7253+ }
7254+ } while (0);
7255+#endif
7256+
7257+ return 1;
7258+}
7259+
7260+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7261+{
7262+ unsigned long i;
7263+
7264+ printk(KERN_ERR "PAX: bytes at PC: ");
7265+ for (i = 0; i < 5; i++) {
7266+ unsigned int c;
7267+ if (get_user(c, (unsigned int *)pc+i))
7268+ printk(KERN_CONT "???????? ");
7269+ else
7270+ printk(KERN_CONT "%08x ", c);
7271+ }
7272+ printk("\n");
7273+}
7274+#endif
7275+
7276 int fixup_exception(struct pt_regs *regs)
7277 {
7278 const struct exception_table_entry *fix;
7279@@ -194,8 +305,33 @@ good_area:
7280
7281 acc_type = parisc_acctyp(code,regs->iir);
7282
7283- if ((vma->vm_flags & acc_type) != acc_type)
7284+ if ((vma->vm_flags & acc_type) != acc_type) {
7285+
7286+#ifdef CONFIG_PAX_PAGEEXEC
7287+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
7288+ (address & ~3UL) == instruction_pointer(regs))
7289+ {
7290+ up_read(&mm->mmap_sem);
7291+ switch (pax_handle_fetch_fault(regs)) {
7292+
7293+#ifdef CONFIG_PAX_EMUPLT
7294+ case 3:
7295+ return;
7296+#endif
7297+
7298+#ifdef CONFIG_PAX_EMUTRAMP
7299+ case 2:
7300+ return;
7301+#endif
7302+
7303+ }
7304+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
7305+ do_group_exit(SIGKILL);
7306+ }
7307+#endif
7308+
7309 goto bad_area;
7310+ }
7311
7312 /*
7313 * If for any reason at all we couldn't handle the fault, make
7314diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
7315index e3b1d41..8e81edf 100644
7316--- a/arch/powerpc/include/asm/atomic.h
7317+++ b/arch/powerpc/include/asm/atomic.h
7318@@ -523,6 +523,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
7319 return t1;
7320 }
7321
7322+#define atomic64_read_unchecked(v) atomic64_read(v)
7323+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7324+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7325+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7326+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7327+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7328+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7329+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7330+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7331+
7332 #endif /* __powerpc64__ */
7333
7334 #endif /* __KERNEL__ */
7335diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
7336index 9e495c9..b6878e5 100644
7337--- a/arch/powerpc/include/asm/cache.h
7338+++ b/arch/powerpc/include/asm/cache.h
7339@@ -3,6 +3,7 @@
7340
7341 #ifdef __KERNEL__
7342
7343+#include <linux/const.h>
7344
7345 /* bytes per L1 cache line */
7346 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
7347@@ -22,7 +23,7 @@
7348 #define L1_CACHE_SHIFT 7
7349 #endif
7350
7351-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7352+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7353
7354 #define SMP_CACHE_BYTES L1_CACHE_BYTES
7355
7356diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
7357index cc0655a..13eac2e 100644
7358--- a/arch/powerpc/include/asm/elf.h
7359+++ b/arch/powerpc/include/asm/elf.h
7360@@ -28,8 +28,19 @@
7361 the loader. We need to make sure that it is out of the way of the program
7362 that it will "exec", and that there is sufficient room for the brk. */
7363
7364-extern unsigned long randomize_et_dyn(unsigned long base);
7365-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
7366+#define ELF_ET_DYN_BASE (0x20000000)
7367+
7368+#ifdef CONFIG_PAX_ASLR
7369+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
7370+
7371+#ifdef __powerpc64__
7372+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
7373+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
7374+#else
7375+#define PAX_DELTA_MMAP_LEN 15
7376+#define PAX_DELTA_STACK_LEN 15
7377+#endif
7378+#endif
7379
7380 /*
7381 * Our registers are always unsigned longs, whether we're a 32 bit
7382@@ -123,10 +134,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
7383 (0x7ff >> (PAGE_SHIFT - 12)) : \
7384 (0x3ffff >> (PAGE_SHIFT - 12)))
7385
7386-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7387-#define arch_randomize_brk arch_randomize_brk
7388-
7389-
7390 #ifdef CONFIG_SPU_BASE
7391 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
7392 #define NT_SPU 1
7393diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
7394index 8196e9c..d83a9f3 100644
7395--- a/arch/powerpc/include/asm/exec.h
7396+++ b/arch/powerpc/include/asm/exec.h
7397@@ -4,6 +4,6 @@
7398 #ifndef _ASM_POWERPC_EXEC_H
7399 #define _ASM_POWERPC_EXEC_H
7400
7401-extern unsigned long arch_align_stack(unsigned long sp);
7402+#define arch_align_stack(x) ((x) & ~0xfUL)
7403
7404 #endif /* _ASM_POWERPC_EXEC_H */
7405diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
7406index 5acabbd..7ea14fa 100644
7407--- a/arch/powerpc/include/asm/kmap_types.h
7408+++ b/arch/powerpc/include/asm/kmap_types.h
7409@@ -10,7 +10,7 @@
7410 * 2 of the License, or (at your option) any later version.
7411 */
7412
7413-#define KM_TYPE_NR 16
7414+#define KM_TYPE_NR 17
7415
7416 #endif /* __KERNEL__ */
7417 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
7418diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
7419index 8565c25..2865190 100644
7420--- a/arch/powerpc/include/asm/mman.h
7421+++ b/arch/powerpc/include/asm/mman.h
7422@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
7423 }
7424 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
7425
7426-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
7427+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
7428 {
7429 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
7430 }
7431diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
7432index 988c812..63c7d70 100644
7433--- a/arch/powerpc/include/asm/page.h
7434+++ b/arch/powerpc/include/asm/page.h
7435@@ -220,8 +220,9 @@ extern long long virt_phys_offset;
7436 * and needs to be executable. This means the whole heap ends
7437 * up being executable.
7438 */
7439-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
7440- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7441+#define VM_DATA_DEFAULT_FLAGS32 \
7442+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
7443+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7444
7445 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
7446 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7447@@ -249,6 +250,9 @@ extern long long virt_phys_offset;
7448 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
7449 #endif
7450
7451+#define ktla_ktva(addr) (addr)
7452+#define ktva_ktla(addr) (addr)
7453+
7454 #ifndef CONFIG_PPC_BOOK3S_64
7455 /*
7456 * Use the top bit of the higher-level page table entries to indicate whether
7457diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
7458index 88693ce..ac6f9ab 100644
7459--- a/arch/powerpc/include/asm/page_64.h
7460+++ b/arch/powerpc/include/asm/page_64.h
7461@@ -153,15 +153,18 @@ do { \
7462 * stack by default, so in the absence of a PT_GNU_STACK program header
7463 * we turn execute permission off.
7464 */
7465-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
7466- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7467+#define VM_STACK_DEFAULT_FLAGS32 \
7468+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
7469+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7470
7471 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
7472 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7473
7474+#ifndef CONFIG_PAX_PAGEEXEC
7475 #define VM_STACK_DEFAULT_FLAGS \
7476 (is_32bit_task() ? \
7477 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
7478+#endif
7479
7480 #include <asm-generic/getorder.h>
7481
7482diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
7483index b66ae72..4a378cd 100644
7484--- a/arch/powerpc/include/asm/pgalloc-64.h
7485+++ b/arch/powerpc/include/asm/pgalloc-64.h
7486@@ -53,6 +53,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
7487 #ifndef CONFIG_PPC_64K_PAGES
7488
7489 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
7490+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
7491
7492 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
7493 {
7494@@ -70,6 +71,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
7495 pud_set(pud, (unsigned long)pmd);
7496 }
7497
7498+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
7499+{
7500+ pud_populate(mm, pud, pmd);
7501+}
7502+
7503 #define pmd_populate(mm, pmd, pte_page) \
7504 pmd_populate_kernel(mm, pmd, page_address(pte_page))
7505 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
7506@@ -171,6 +177,7 @@ extern void __tlb_remove_table(void *_table);
7507 #endif
7508
7509 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
7510+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
7511
7512 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
7513 pte_t *pte)
7514diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
7515index 7aeb955..19f748e 100644
7516--- a/arch/powerpc/include/asm/pgtable.h
7517+++ b/arch/powerpc/include/asm/pgtable.h
7518@@ -2,6 +2,7 @@
7519 #define _ASM_POWERPC_PGTABLE_H
7520 #ifdef __KERNEL__
7521
7522+#include <linux/const.h>
7523 #ifndef __ASSEMBLY__
7524 #include <asm/processor.h> /* For TASK_SIZE */
7525 #include <asm/mmu.h>
7526diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
7527index 4aad413..85d86bf 100644
7528--- a/arch/powerpc/include/asm/pte-hash32.h
7529+++ b/arch/powerpc/include/asm/pte-hash32.h
7530@@ -21,6 +21,7 @@
7531 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
7532 #define _PAGE_USER 0x004 /* usermode access allowed */
7533 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
7534+#define _PAGE_EXEC _PAGE_GUARDED
7535 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
7536 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
7537 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
7538diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
7539index e1fb161..2290d1d 100644
7540--- a/arch/powerpc/include/asm/reg.h
7541+++ b/arch/powerpc/include/asm/reg.h
7542@@ -234,6 +234,7 @@
7543 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
7544 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
7545 #define DSISR_NOHPTE 0x40000000 /* no translation found */
7546+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
7547 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
7548 #define DSISR_ISSTORE 0x02000000 /* access was a store */
7549 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
7550diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
7551index 48cfc85..891382f 100644
7552--- a/arch/powerpc/include/asm/smp.h
7553+++ b/arch/powerpc/include/asm/smp.h
7554@@ -50,7 +50,7 @@ struct smp_ops_t {
7555 int (*cpu_disable)(void);
7556 void (*cpu_die)(unsigned int nr);
7557 int (*cpu_bootable)(unsigned int nr);
7558-};
7559+} __no_const;
7560
7561 extern void smp_send_debugger_break(void);
7562 extern void start_secondary_resume(void);
7563diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
7564index ba7b197..d292e26 100644
7565--- a/arch/powerpc/include/asm/thread_info.h
7566+++ b/arch/powerpc/include/asm/thread_info.h
7567@@ -93,7 +93,6 @@ static inline struct thread_info *current_thread_info(void)
7568 #define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
7569 TIF_NEED_RESCHED */
7570 #define TIF_32BIT 4 /* 32 bit binary */
7571-#define TIF_PERFMON_WORK 5 /* work for pfm_handle_work() */
7572 #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
7573 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
7574 #define TIF_SINGLESTEP 8 /* singlestepping active */
7575@@ -107,6 +106,9 @@ static inline struct thread_info *current_thread_info(void)
7576 #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
7577 for stack store? */
7578 #define TIF_MEMDIE 17 /* is terminating due to OOM killer */
7579+#define TIF_PERFMON_WORK 18 /* work for pfm_handle_work() */
7580+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
7581+#define TIF_GRSEC_SETXID 5 /* update credentials on syscall entry/exit */
7582
7583 /* as above, but as bit values */
7584 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
7585@@ -126,9 +128,10 @@ static inline struct thread_info *current_thread_info(void)
7586 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
7587 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
7588 #define _TIF_NOHZ (1<<TIF_NOHZ)
7589+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
7590 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
7591 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
7592- _TIF_NOHZ)
7593+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
7594
7595 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
7596 _TIF_NOTIFY_RESUME | _TIF_UPROBE)
7597diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
7598index 4db4959..aba5c41 100644
7599--- a/arch/powerpc/include/asm/uaccess.h
7600+++ b/arch/powerpc/include/asm/uaccess.h
7601@@ -318,52 +318,6 @@ do { \
7602 extern unsigned long __copy_tofrom_user(void __user *to,
7603 const void __user *from, unsigned long size);
7604
7605-#ifndef __powerpc64__
7606-
7607-static inline unsigned long copy_from_user(void *to,
7608- const void __user *from, unsigned long n)
7609-{
7610- unsigned long over;
7611-
7612- if (access_ok(VERIFY_READ, from, n))
7613- return __copy_tofrom_user((__force void __user *)to, from, n);
7614- if ((unsigned long)from < TASK_SIZE) {
7615- over = (unsigned long)from + n - TASK_SIZE;
7616- return __copy_tofrom_user((__force void __user *)to, from,
7617- n - over) + over;
7618- }
7619- return n;
7620-}
7621-
7622-static inline unsigned long copy_to_user(void __user *to,
7623- const void *from, unsigned long n)
7624-{
7625- unsigned long over;
7626-
7627- if (access_ok(VERIFY_WRITE, to, n))
7628- return __copy_tofrom_user(to, (__force void __user *)from, n);
7629- if ((unsigned long)to < TASK_SIZE) {
7630- over = (unsigned long)to + n - TASK_SIZE;
7631- return __copy_tofrom_user(to, (__force void __user *)from,
7632- n - over) + over;
7633- }
7634- return n;
7635-}
7636-
7637-#else /* __powerpc64__ */
7638-
7639-#define __copy_in_user(to, from, size) \
7640- __copy_tofrom_user((to), (from), (size))
7641-
7642-extern unsigned long copy_from_user(void *to, const void __user *from,
7643- unsigned long n);
7644-extern unsigned long copy_to_user(void __user *to, const void *from,
7645- unsigned long n);
7646-extern unsigned long copy_in_user(void __user *to, const void __user *from,
7647- unsigned long n);
7648-
7649-#endif /* __powerpc64__ */
7650-
7651 static inline unsigned long __copy_from_user_inatomic(void *to,
7652 const void __user *from, unsigned long n)
7653 {
7654@@ -387,6 +341,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
7655 if (ret == 0)
7656 return 0;
7657 }
7658+
7659+ if (!__builtin_constant_p(n))
7660+ check_object_size(to, n, false);
7661+
7662 return __copy_tofrom_user((__force void __user *)to, from, n);
7663 }
7664
7665@@ -413,6 +371,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
7666 if (ret == 0)
7667 return 0;
7668 }
7669+
7670+ if (!__builtin_constant_p(n))
7671+ check_object_size(from, n, true);
7672+
7673 return __copy_tofrom_user(to, (__force const void __user *)from, n);
7674 }
7675
7676@@ -430,6 +392,92 @@ static inline unsigned long __copy_to_user(void __user *to,
7677 return __copy_to_user_inatomic(to, from, size);
7678 }
7679
7680+#ifndef __powerpc64__
7681+
7682+static inline unsigned long __must_check copy_from_user(void *to,
7683+ const void __user *from, unsigned long n)
7684+{
7685+ unsigned long over;
7686+
7687+ if ((long)n < 0)
7688+ return n;
7689+
7690+ if (access_ok(VERIFY_READ, from, n)) {
7691+ if (!__builtin_constant_p(n))
7692+ check_object_size(to, n, false);
7693+ return __copy_tofrom_user((__force void __user *)to, from, n);
7694+ }
7695+ if ((unsigned long)from < TASK_SIZE) {
7696+ over = (unsigned long)from + n - TASK_SIZE;
7697+ if (!__builtin_constant_p(n - over))
7698+ check_object_size(to, n - over, false);
7699+ return __copy_tofrom_user((__force void __user *)to, from,
7700+ n - over) + over;
7701+ }
7702+ return n;
7703+}
7704+
7705+static inline unsigned long __must_check copy_to_user(void __user *to,
7706+ const void *from, unsigned long n)
7707+{
7708+ unsigned long over;
7709+
7710+ if ((long)n < 0)
7711+ return n;
7712+
7713+ if (access_ok(VERIFY_WRITE, to, n)) {
7714+ if (!__builtin_constant_p(n))
7715+ check_object_size(from, n, true);
7716+ return __copy_tofrom_user(to, (__force void __user *)from, n);
7717+ }
7718+ if ((unsigned long)to < TASK_SIZE) {
7719+ over = (unsigned long)to + n - TASK_SIZE;
7720+ if (!__builtin_constant_p(n))
7721+ check_object_size(from, n - over, true);
7722+ return __copy_tofrom_user(to, (__force void __user *)from,
7723+ n - over) + over;
7724+ }
7725+ return n;
7726+}
7727+
7728+#else /* __powerpc64__ */
7729+
7730+#define __copy_in_user(to, from, size) \
7731+ __copy_tofrom_user((to), (from), (size))
7732+
7733+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
7734+{
7735+ if ((long)n < 0 || n > INT_MAX)
7736+ return n;
7737+
7738+ if (!__builtin_constant_p(n))
7739+ check_object_size(to, n, false);
7740+
7741+ if (likely(access_ok(VERIFY_READ, from, n)))
7742+ n = __copy_from_user(to, from, n);
7743+ else
7744+ memset(to, 0, n);
7745+ return n;
7746+}
7747+
7748+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
7749+{
7750+ if ((long)n < 0 || n > INT_MAX)
7751+ return n;
7752+
7753+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
7754+ if (!__builtin_constant_p(n))
7755+ check_object_size(from, n, true);
7756+ n = __copy_to_user(to, from, n);
7757+ }
7758+ return n;
7759+}
7760+
7761+extern unsigned long copy_in_user(void __user *to, const void __user *from,
7762+ unsigned long n);
7763+
7764+#endif /* __powerpc64__ */
7765+
7766 extern unsigned long __clear_user(void __user *addr, unsigned long size);
7767
7768 static inline unsigned long clear_user(void __user *addr, unsigned long size)
7769diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
7770index 645170a..6cf0271 100644
7771--- a/arch/powerpc/kernel/exceptions-64e.S
7772+++ b/arch/powerpc/kernel/exceptions-64e.S
7773@@ -757,6 +757,7 @@ storage_fault_common:
7774 std r14,_DAR(r1)
7775 std r15,_DSISR(r1)
7776 addi r3,r1,STACK_FRAME_OVERHEAD
7777+ bl .save_nvgprs
7778 mr r4,r14
7779 mr r5,r15
7780 ld r14,PACA_EXGEN+EX_R14(r13)
7781@@ -765,8 +766,7 @@ storage_fault_common:
7782 cmpdi r3,0
7783 bne- 1f
7784 b .ret_from_except_lite
7785-1: bl .save_nvgprs
7786- mr r5,r3
7787+1: mr r5,r3
7788 addi r3,r1,STACK_FRAME_OVERHEAD
7789 ld r4,_DAR(r1)
7790 bl .bad_page_fault
7791diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
7792index 902ca3c..e942155 100644
7793--- a/arch/powerpc/kernel/exceptions-64s.S
7794+++ b/arch/powerpc/kernel/exceptions-64s.S
7795@@ -1357,10 +1357,10 @@ handle_page_fault:
7796 11: ld r4,_DAR(r1)
7797 ld r5,_DSISR(r1)
7798 addi r3,r1,STACK_FRAME_OVERHEAD
7799+ bl .save_nvgprs
7800 bl .do_page_fault
7801 cmpdi r3,0
7802 beq+ 12f
7803- bl .save_nvgprs
7804 mr r5,r3
7805 addi r3,r1,STACK_FRAME_OVERHEAD
7806 lwz r4,_DAR(r1)
7807diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
7808index 2e3200c..72095ce 100644
7809--- a/arch/powerpc/kernel/module_32.c
7810+++ b/arch/powerpc/kernel/module_32.c
7811@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
7812 me->arch.core_plt_section = i;
7813 }
7814 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
7815- printk("Module doesn't contain .plt or .init.plt sections.\n");
7816+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
7817 return -ENOEXEC;
7818 }
7819
7820@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
7821
7822 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
7823 /* Init, or core PLT? */
7824- if (location >= mod->module_core
7825- && location < mod->module_core + mod->core_size)
7826+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
7827+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
7828 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
7829- else
7830+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
7831+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
7832 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
7833+ else {
7834+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
7835+ return ~0UL;
7836+ }
7837
7838 /* Find this entry, or if that fails, the next avail. entry */
7839 while (entry->jump[0]) {
7840diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
7841index 7baa27b..f6b394a 100644
7842--- a/arch/powerpc/kernel/process.c
7843+++ b/arch/powerpc/kernel/process.c
7844@@ -884,8 +884,8 @@ void show_regs(struct pt_regs * regs)
7845 * Lookup NIP late so we have the best change of getting the
7846 * above info out without failing
7847 */
7848- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
7849- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
7850+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
7851+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
7852 #endif
7853 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
7854 printk("PACATMSCRATCH [%llx]\n", get_paca()->tm_scratch);
7855@@ -1345,10 +1345,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
7856 newsp = stack[0];
7857 ip = stack[STACK_FRAME_LR_SAVE];
7858 if (!firstframe || ip != lr) {
7859- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
7860+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
7861 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
7862 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
7863- printk(" (%pS)",
7864+ printk(" (%pA)",
7865 (void *)current->ret_stack[curr_frame].ret);
7866 curr_frame--;
7867 }
7868@@ -1368,7 +1368,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
7869 struct pt_regs *regs = (struct pt_regs *)
7870 (sp + STACK_FRAME_OVERHEAD);
7871 lr = regs->link;
7872- printk("--- Exception: %lx at %pS\n LR = %pS\n",
7873+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
7874 regs->trap, (void *)regs->nip, (void *)lr);
7875 firstframe = 1;
7876 }
7877@@ -1404,58 +1404,3 @@ void notrace __ppc64_runlatch_off(void)
7878 mtspr(SPRN_CTRLT, ctrl);
7879 }
7880 #endif /* CONFIG_PPC64 */
7881-
7882-unsigned long arch_align_stack(unsigned long sp)
7883-{
7884- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7885- sp -= get_random_int() & ~PAGE_MASK;
7886- return sp & ~0xf;
7887-}
7888-
7889-static inline unsigned long brk_rnd(void)
7890-{
7891- unsigned long rnd = 0;
7892-
7893- /* 8MB for 32bit, 1GB for 64bit */
7894- if (is_32bit_task())
7895- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
7896- else
7897- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
7898-
7899- return rnd << PAGE_SHIFT;
7900-}
7901-
7902-unsigned long arch_randomize_brk(struct mm_struct *mm)
7903-{
7904- unsigned long base = mm->brk;
7905- unsigned long ret;
7906-
7907-#ifdef CONFIG_PPC_STD_MMU_64
7908- /*
7909- * If we are using 1TB segments and we are allowed to randomise
7910- * the heap, we can put it above 1TB so it is backed by a 1TB
7911- * segment. Otherwise the heap will be in the bottom 1TB
7912- * which always uses 256MB segments and this may result in a
7913- * performance penalty.
7914- */
7915- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
7916- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
7917-#endif
7918-
7919- ret = PAGE_ALIGN(base + brk_rnd());
7920-
7921- if (ret < mm->brk)
7922- return mm->brk;
7923-
7924- return ret;
7925-}
7926-
7927-unsigned long randomize_et_dyn(unsigned long base)
7928-{
7929- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
7930-
7931- if (ret < base)
7932- return base;
7933-
7934- return ret;
7935-}
7936diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
7937index 64f7bd5..8dd550f 100644
7938--- a/arch/powerpc/kernel/ptrace.c
7939+++ b/arch/powerpc/kernel/ptrace.c
7940@@ -1783,6 +1783,10 @@ long arch_ptrace(struct task_struct *child, long request,
7941 return ret;
7942 }
7943
7944+#ifdef CONFIG_GRKERNSEC_SETXID
7945+extern void gr_delayed_cred_worker(void);
7946+#endif
7947+
7948 /*
7949 * We must return the syscall number to actually look up in the table.
7950 * This can be -1L to skip running any syscall at all.
7951@@ -1795,6 +1799,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
7952
7953 secure_computing_strict(regs->gpr[0]);
7954
7955+#ifdef CONFIG_GRKERNSEC_SETXID
7956+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
7957+ gr_delayed_cred_worker();
7958+#endif
7959+
7960 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
7961 tracehook_report_syscall_entry(regs))
7962 /*
7963@@ -1829,6 +1838,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
7964 {
7965 int step;
7966
7967+#ifdef CONFIG_GRKERNSEC_SETXID
7968+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
7969+ gr_delayed_cred_worker();
7970+#endif
7971+
7972 audit_syscall_exit(regs);
7973
7974 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
7975diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
7976index 0f83122..c0aca6a 100644
7977--- a/arch/powerpc/kernel/signal_32.c
7978+++ b/arch/powerpc/kernel/signal_32.c
7979@@ -987,7 +987,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
7980 /* Save user registers on the stack */
7981 frame = &rt_sf->uc.uc_mcontext;
7982 addr = frame;
7983- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
7984+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
7985 sigret = 0;
7986 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
7987 } else {
7988diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
7989index 887e99d..310bc11 100644
7990--- a/arch/powerpc/kernel/signal_64.c
7991+++ b/arch/powerpc/kernel/signal_64.c
7992@@ -751,7 +751,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
7993 #endif
7994
7995 /* Set up to return from userspace. */
7996- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
7997+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
7998 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
7999 } else {
8000 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
8001diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
8002index e68a845..8b140e6 100644
8003--- a/arch/powerpc/kernel/sysfs.c
8004+++ b/arch/powerpc/kernel/sysfs.c
8005@@ -522,7 +522,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
8006 return NOTIFY_OK;
8007 }
8008
8009-static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
8010+static struct notifier_block sysfs_cpu_nb = {
8011 .notifier_call = sysfs_cpu_notify,
8012 };
8013
8014diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
8015index 88929b1..bece8f8 100644
8016--- a/arch/powerpc/kernel/traps.c
8017+++ b/arch/powerpc/kernel/traps.c
8018@@ -141,6 +141,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
8019 return flags;
8020 }
8021
8022+extern void gr_handle_kernel_exploit(void);
8023+
8024 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
8025 int signr)
8026 {
8027@@ -190,6 +192,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
8028 panic("Fatal exception in interrupt");
8029 if (panic_on_oops)
8030 panic("Fatal exception");
8031+
8032+ gr_handle_kernel_exploit();
8033+
8034 do_exit(signr);
8035 }
8036
8037diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
8038index d4f463a..8fb7431 100644
8039--- a/arch/powerpc/kernel/vdso.c
8040+++ b/arch/powerpc/kernel/vdso.c
8041@@ -34,6 +34,7 @@
8042 #include <asm/firmware.h>
8043 #include <asm/vdso.h>
8044 #include <asm/vdso_datapage.h>
8045+#include <asm/mman.h>
8046
8047 #include "setup.h"
8048
8049@@ -222,7 +223,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
8050 vdso_base = VDSO32_MBASE;
8051 #endif
8052
8053- current->mm->context.vdso_base = 0;
8054+ current->mm->context.vdso_base = ~0UL;
8055
8056 /* vDSO has a problem and was disabled, just don't "enable" it for the
8057 * process
8058@@ -242,7 +243,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
8059 vdso_base = get_unmapped_area(NULL, vdso_base,
8060 (vdso_pages << PAGE_SHIFT) +
8061 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
8062- 0, 0);
8063+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
8064 if (IS_ERR_VALUE(vdso_base)) {
8065 rc = vdso_base;
8066 goto fail_mmapsem;
8067diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
8068index 5eea6f3..5d10396 100644
8069--- a/arch/powerpc/lib/usercopy_64.c
8070+++ b/arch/powerpc/lib/usercopy_64.c
8071@@ -9,22 +9,6 @@
8072 #include <linux/module.h>
8073 #include <asm/uaccess.h>
8074
8075-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
8076-{
8077- if (likely(access_ok(VERIFY_READ, from, n)))
8078- n = __copy_from_user(to, from, n);
8079- else
8080- memset(to, 0, n);
8081- return n;
8082-}
8083-
8084-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
8085-{
8086- if (likely(access_ok(VERIFY_WRITE, to, n)))
8087- n = __copy_to_user(to, from, n);
8088- return n;
8089-}
8090-
8091 unsigned long copy_in_user(void __user *to, const void __user *from,
8092 unsigned long n)
8093 {
8094@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
8095 return n;
8096 }
8097
8098-EXPORT_SYMBOL(copy_from_user);
8099-EXPORT_SYMBOL(copy_to_user);
8100 EXPORT_SYMBOL(copy_in_user);
8101
8102diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
8103index 8726779..a33c512 100644
8104--- a/arch/powerpc/mm/fault.c
8105+++ b/arch/powerpc/mm/fault.c
8106@@ -33,6 +33,10 @@
8107 #include <linux/magic.h>
8108 #include <linux/ratelimit.h>
8109 #include <linux/context_tracking.h>
8110+#include <linux/slab.h>
8111+#include <linux/pagemap.h>
8112+#include <linux/compiler.h>
8113+#include <linux/unistd.h>
8114
8115 #include <asm/firmware.h>
8116 #include <asm/page.h>
8117@@ -69,6 +73,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
8118 }
8119 #endif
8120
8121+#ifdef CONFIG_PAX_PAGEEXEC
8122+/*
8123+ * PaX: decide what to do with offenders (regs->nip = fault address)
8124+ *
8125+ * returns 1 when task should be killed
8126+ */
8127+static int pax_handle_fetch_fault(struct pt_regs *regs)
8128+{
8129+ return 1;
8130+}
8131+
8132+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
8133+{
8134+ unsigned long i;
8135+
8136+ printk(KERN_ERR "PAX: bytes at PC: ");
8137+ for (i = 0; i < 5; i++) {
8138+ unsigned int c;
8139+ if (get_user(c, (unsigned int __user *)pc+i))
8140+ printk(KERN_CONT "???????? ");
8141+ else
8142+ printk(KERN_CONT "%08x ", c);
8143+ }
8144+ printk("\n");
8145+}
8146+#endif
8147+
8148 /*
8149 * Check whether the instruction at regs->nip is a store using
8150 * an update addressing form which will update r1.
8151@@ -216,7 +247,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
8152 * indicate errors in DSISR but can validly be set in SRR1.
8153 */
8154 if (trap == 0x400)
8155- error_code &= 0x48200000;
8156+ error_code &= 0x58200000;
8157 else
8158 is_write = error_code & DSISR_ISSTORE;
8159 #else
8160@@ -371,7 +402,7 @@ good_area:
8161 * "undefined". Of those that can be set, this is the only
8162 * one which seems bad.
8163 */
8164- if (error_code & 0x10000000)
8165+ if (error_code & DSISR_GUARDED)
8166 /* Guarded storage error. */
8167 goto bad_area;
8168 #endif /* CONFIG_8xx */
8169@@ -386,7 +417,7 @@ good_area:
8170 * processors use the same I/D cache coherency mechanism
8171 * as embedded.
8172 */
8173- if (error_code & DSISR_PROTFAULT)
8174+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
8175 goto bad_area;
8176 #endif /* CONFIG_PPC_STD_MMU */
8177
8178@@ -471,6 +502,23 @@ bad_area:
8179 bad_area_nosemaphore:
8180 /* User mode accesses cause a SIGSEGV */
8181 if (user_mode(regs)) {
8182+
8183+#ifdef CONFIG_PAX_PAGEEXEC
8184+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
8185+#ifdef CONFIG_PPC_STD_MMU
8186+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
8187+#else
8188+ if (is_exec && regs->nip == address) {
8189+#endif
8190+ switch (pax_handle_fetch_fault(regs)) {
8191+ }
8192+
8193+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
8194+ do_group_exit(SIGKILL);
8195+ }
8196+ }
8197+#endif
8198+
8199 _exception(SIGSEGV, regs, code, address);
8200 goto bail;
8201 }
8202diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
8203index 67a42ed..cd463e0 100644
8204--- a/arch/powerpc/mm/mmap_64.c
8205+++ b/arch/powerpc/mm/mmap_64.c
8206@@ -57,6 +57,10 @@ static unsigned long mmap_rnd(void)
8207 {
8208 unsigned long rnd = 0;
8209
8210+#ifdef CONFIG_PAX_RANDMMAP
8211+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8212+#endif
8213+
8214 if (current->flags & PF_RANDOMIZE) {
8215 /* 8MB for 32bit, 1GB for 64bit */
8216 if (is_32bit_task())
8217@@ -91,10 +95,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8218 */
8219 if (mmap_is_legacy()) {
8220 mm->mmap_base = TASK_UNMAPPED_BASE;
8221+
8222+#ifdef CONFIG_PAX_RANDMMAP
8223+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8224+ mm->mmap_base += mm->delta_mmap;
8225+#endif
8226+
8227 mm->get_unmapped_area = arch_get_unmapped_area;
8228 mm->unmap_area = arch_unmap_area;
8229 } else {
8230 mm->mmap_base = mmap_base();
8231+
8232+#ifdef CONFIG_PAX_RANDMMAP
8233+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8234+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
8235+#endif
8236+
8237 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
8238 mm->unmap_area = arch_unmap_area_topdown;
8239 }
8240diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
8241index e779642..e5bb889 100644
8242--- a/arch/powerpc/mm/mmu_context_nohash.c
8243+++ b/arch/powerpc/mm/mmu_context_nohash.c
8244@@ -363,7 +363,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
8245 return NOTIFY_OK;
8246 }
8247
8248-static struct notifier_block __cpuinitdata mmu_context_cpu_nb = {
8249+static struct notifier_block mmu_context_cpu_nb = {
8250 .notifier_call = mmu_context_cpu_notify,
8251 };
8252
8253diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
8254index cafad40..9cbc0fc 100644
8255--- a/arch/powerpc/mm/numa.c
8256+++ b/arch/powerpc/mm/numa.c
8257@@ -920,7 +920,7 @@ static void __init *careful_zallocation(int nid, unsigned long size,
8258 return ret;
8259 }
8260
8261-static struct notifier_block __cpuinitdata ppc64_numa_nb = {
8262+static struct notifier_block ppc64_numa_nb = {
8263 .notifier_call = cpu_numa_callback,
8264 .priority = 1 /* Must run before sched domains notifier. */
8265 };
8266diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
8267index 3e99c14..f00953c 100644
8268--- a/arch/powerpc/mm/slice.c
8269+++ b/arch/powerpc/mm/slice.c
8270@@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
8271 if ((mm->task_size - len) < addr)
8272 return 0;
8273 vma = find_vma(mm, addr);
8274- return (!vma || (addr + len) <= vma->vm_start);
8275+ return check_heap_stack_gap(vma, addr, len, 0);
8276 }
8277
8278 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
8279@@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
8280 info.align_offset = 0;
8281
8282 addr = TASK_UNMAPPED_BASE;
8283+
8284+#ifdef CONFIG_PAX_RANDMMAP
8285+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8286+ addr += mm->delta_mmap;
8287+#endif
8288+
8289 while (addr < TASK_SIZE) {
8290 info.low_limit = addr;
8291 if (!slice_scan_available(addr, available, 1, &addr))
8292@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
8293 if (fixed && addr > (mm->task_size - len))
8294 return -EINVAL;
8295
8296+#ifdef CONFIG_PAX_RANDMMAP
8297+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
8298+ addr = 0;
8299+#endif
8300+
8301 /* If hint, make sure it matches our alignment restrictions */
8302 if (!fixed && addr) {
8303 addr = _ALIGN_UP(addr, 1ul << pshift);
8304diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
8305index 9098692..3d54cd1 100644
8306--- a/arch/powerpc/platforms/cell/spufs/file.c
8307+++ b/arch/powerpc/platforms/cell/spufs/file.c
8308@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
8309 return VM_FAULT_NOPAGE;
8310 }
8311
8312-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
8313+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
8314 unsigned long address,
8315- void *buf, int len, int write)
8316+ void *buf, size_t len, int write)
8317 {
8318 struct spu_context *ctx = vma->vm_file->private_data;
8319 unsigned long offset = address - vma->vm_start;
8320diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
8321index bdb738a..49c9f95 100644
8322--- a/arch/powerpc/platforms/powermac/smp.c
8323+++ b/arch/powerpc/platforms/powermac/smp.c
8324@@ -885,7 +885,7 @@ static int smp_core99_cpu_notify(struct notifier_block *self,
8325 return NOTIFY_OK;
8326 }
8327
8328-static struct notifier_block __cpuinitdata smp_core99_cpu_nb = {
8329+static struct notifier_block smp_core99_cpu_nb = {
8330 .notifier_call = smp_core99_cpu_notify,
8331 };
8332 #endif /* CONFIG_HOTPLUG_CPU */
8333diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
8334index c797832..ce575c8 100644
8335--- a/arch/s390/include/asm/atomic.h
8336+++ b/arch/s390/include/asm/atomic.h
8337@@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
8338 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
8339 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
8340
8341+#define atomic64_read_unchecked(v) atomic64_read(v)
8342+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
8343+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
8344+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
8345+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
8346+#define atomic64_inc_unchecked(v) atomic64_inc(v)
8347+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
8348+#define atomic64_dec_unchecked(v) atomic64_dec(v)
8349+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
8350+
8351 #define smp_mb__before_atomic_dec() smp_mb()
8352 #define smp_mb__after_atomic_dec() smp_mb()
8353 #define smp_mb__before_atomic_inc() smp_mb()
8354diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
8355index 4d7ccac..d03d0ad 100644
8356--- a/arch/s390/include/asm/cache.h
8357+++ b/arch/s390/include/asm/cache.h
8358@@ -9,8 +9,10 @@
8359 #ifndef __ARCH_S390_CACHE_H
8360 #define __ARCH_S390_CACHE_H
8361
8362-#define L1_CACHE_BYTES 256
8363+#include <linux/const.h>
8364+
8365 #define L1_CACHE_SHIFT 8
8366+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8367 #define NET_SKB_PAD 32
8368
8369 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
8370diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
8371index 78f4f87..598ce39 100644
8372--- a/arch/s390/include/asm/elf.h
8373+++ b/arch/s390/include/asm/elf.h
8374@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
8375 the loader. We need to make sure that it is out of the way of the program
8376 that it will "exec", and that there is sufficient room for the brk. */
8377
8378-extern unsigned long randomize_et_dyn(unsigned long base);
8379-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
8380+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
8381+
8382+#ifdef CONFIG_PAX_ASLR
8383+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
8384+
8385+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
8386+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
8387+#endif
8388
8389 /* This yields a mask that user programs can use to figure out what
8390 instruction set this CPU supports. */
8391@@ -222,9 +228,6 @@ struct linux_binprm;
8392 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
8393 int arch_setup_additional_pages(struct linux_binprm *, int);
8394
8395-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8396-#define arch_randomize_brk arch_randomize_brk
8397-
8398 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
8399
8400 #endif
8401diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
8402index c4a93d6..4d2a9b4 100644
8403--- a/arch/s390/include/asm/exec.h
8404+++ b/arch/s390/include/asm/exec.h
8405@@ -7,6 +7,6 @@
8406 #ifndef __ASM_EXEC_H
8407 #define __ASM_EXEC_H
8408
8409-extern unsigned long arch_align_stack(unsigned long sp);
8410+#define arch_align_stack(x) ((x) & ~0xfUL)
8411
8412 #endif /* __ASM_EXEC_H */
8413diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
8414index 9c33ed4..e40cbef 100644
8415--- a/arch/s390/include/asm/uaccess.h
8416+++ b/arch/s390/include/asm/uaccess.h
8417@@ -252,6 +252,10 @@ static inline unsigned long __must_check
8418 copy_to_user(void __user *to, const void *from, unsigned long n)
8419 {
8420 might_fault();
8421+
8422+ if ((long)n < 0)
8423+ return n;
8424+
8425 return __copy_to_user(to, from, n);
8426 }
8427
8428@@ -275,6 +279,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
8429 static inline unsigned long __must_check
8430 __copy_from_user(void *to, const void __user *from, unsigned long n)
8431 {
8432+ if ((long)n < 0)
8433+ return n;
8434+
8435 if (__builtin_constant_p(n) && (n <= 256))
8436 return uaccess.copy_from_user_small(n, from, to);
8437 else
8438@@ -306,10 +313,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
8439 static inline unsigned long __must_check
8440 copy_from_user(void *to, const void __user *from, unsigned long n)
8441 {
8442- unsigned int sz = __compiletime_object_size(to);
8443+ size_t sz = __compiletime_object_size(to);
8444
8445 might_fault();
8446- if (unlikely(sz != -1 && sz < n)) {
8447+
8448+ if ((long)n < 0)
8449+ return n;
8450+
8451+ if (unlikely(sz != (size_t)-1 && sz < n)) {
8452 copy_from_user_overflow();
8453 return n;
8454 }
8455diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
8456index 7845e15..59c4353 100644
8457--- a/arch/s390/kernel/module.c
8458+++ b/arch/s390/kernel/module.c
8459@@ -169,11 +169,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
8460
8461 /* Increase core size by size of got & plt and set start
8462 offsets for got and plt. */
8463- me->core_size = ALIGN(me->core_size, 4);
8464- me->arch.got_offset = me->core_size;
8465- me->core_size += me->arch.got_size;
8466- me->arch.plt_offset = me->core_size;
8467- me->core_size += me->arch.plt_size;
8468+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
8469+ me->arch.got_offset = me->core_size_rw;
8470+ me->core_size_rw += me->arch.got_size;
8471+ me->arch.plt_offset = me->core_size_rx;
8472+ me->core_size_rx += me->arch.plt_size;
8473 return 0;
8474 }
8475
8476@@ -289,7 +289,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
8477 if (info->got_initialized == 0) {
8478 Elf_Addr *gotent;
8479
8480- gotent = me->module_core + me->arch.got_offset +
8481+ gotent = me->module_core_rw + me->arch.got_offset +
8482 info->got_offset;
8483 *gotent = val;
8484 info->got_initialized = 1;
8485@@ -312,7 +312,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
8486 rc = apply_rela_bits(loc, val, 0, 64, 0);
8487 else if (r_type == R_390_GOTENT ||
8488 r_type == R_390_GOTPLTENT) {
8489- val += (Elf_Addr) me->module_core - loc;
8490+ val += (Elf_Addr) me->module_core_rw - loc;
8491 rc = apply_rela_bits(loc, val, 1, 32, 1);
8492 }
8493 break;
8494@@ -325,7 +325,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
8495 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
8496 if (info->plt_initialized == 0) {
8497 unsigned int *ip;
8498- ip = me->module_core + me->arch.plt_offset +
8499+ ip = me->module_core_rx + me->arch.plt_offset +
8500 info->plt_offset;
8501 #ifndef CONFIG_64BIT
8502 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
8503@@ -350,7 +350,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
8504 val - loc + 0xffffUL < 0x1ffffeUL) ||
8505 (r_type == R_390_PLT32DBL &&
8506 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
8507- val = (Elf_Addr) me->module_core +
8508+ val = (Elf_Addr) me->module_core_rx +
8509 me->arch.plt_offset +
8510 info->plt_offset;
8511 val += rela->r_addend - loc;
8512@@ -372,7 +372,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
8513 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
8514 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
8515 val = val + rela->r_addend -
8516- ((Elf_Addr) me->module_core + me->arch.got_offset);
8517+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
8518 if (r_type == R_390_GOTOFF16)
8519 rc = apply_rela_bits(loc, val, 0, 16, 0);
8520 else if (r_type == R_390_GOTOFF32)
8521@@ -382,7 +382,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
8522 break;
8523 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
8524 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
8525- val = (Elf_Addr) me->module_core + me->arch.got_offset +
8526+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
8527 rela->r_addend - loc;
8528 if (r_type == R_390_GOTPC)
8529 rc = apply_rela_bits(loc, val, 1, 32, 0);
8530diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
8531index 2bc3edd..ab9d598 100644
8532--- a/arch/s390/kernel/process.c
8533+++ b/arch/s390/kernel/process.c
8534@@ -236,39 +236,3 @@ unsigned long get_wchan(struct task_struct *p)
8535 }
8536 return 0;
8537 }
8538-
8539-unsigned long arch_align_stack(unsigned long sp)
8540-{
8541- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
8542- sp -= get_random_int() & ~PAGE_MASK;
8543- return sp & ~0xf;
8544-}
8545-
8546-static inline unsigned long brk_rnd(void)
8547-{
8548- /* 8MB for 32bit, 1GB for 64bit */
8549- if (is_32bit_task())
8550- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
8551- else
8552- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
8553-}
8554-
8555-unsigned long arch_randomize_brk(struct mm_struct *mm)
8556-{
8557- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
8558-
8559- if (ret < mm->brk)
8560- return mm->brk;
8561- return ret;
8562-}
8563-
8564-unsigned long randomize_et_dyn(unsigned long base)
8565-{
8566- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
8567-
8568- if (!(current->flags & PF_RANDOMIZE))
8569- return base;
8570- if (ret < base)
8571- return base;
8572- return ret;
8573-}
8574diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
8575index 06bafec..2bca531 100644
8576--- a/arch/s390/mm/mmap.c
8577+++ b/arch/s390/mm/mmap.c
8578@@ -90,10 +90,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8579 */
8580 if (mmap_is_legacy()) {
8581 mm->mmap_base = TASK_UNMAPPED_BASE;
8582+
8583+#ifdef CONFIG_PAX_RANDMMAP
8584+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8585+ mm->mmap_base += mm->delta_mmap;
8586+#endif
8587+
8588 mm->get_unmapped_area = arch_get_unmapped_area;
8589 mm->unmap_area = arch_unmap_area;
8590 } else {
8591 mm->mmap_base = mmap_base();
8592+
8593+#ifdef CONFIG_PAX_RANDMMAP
8594+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8595+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
8596+#endif
8597+
8598 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
8599 mm->unmap_area = arch_unmap_area_topdown;
8600 }
8601@@ -175,10 +187,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8602 */
8603 if (mmap_is_legacy()) {
8604 mm->mmap_base = TASK_UNMAPPED_BASE;
8605+
8606+#ifdef CONFIG_PAX_RANDMMAP
8607+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8608+ mm->mmap_base += mm->delta_mmap;
8609+#endif
8610+
8611 mm->get_unmapped_area = s390_get_unmapped_area;
8612 mm->unmap_area = arch_unmap_area;
8613 } else {
8614 mm->mmap_base = mmap_base();
8615+
8616+#ifdef CONFIG_PAX_RANDMMAP
8617+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8618+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
8619+#endif
8620+
8621 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
8622 mm->unmap_area = arch_unmap_area_topdown;
8623 }
8624diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
8625index ae3d59f..f65f075 100644
8626--- a/arch/score/include/asm/cache.h
8627+++ b/arch/score/include/asm/cache.h
8628@@ -1,7 +1,9 @@
8629 #ifndef _ASM_SCORE_CACHE_H
8630 #define _ASM_SCORE_CACHE_H
8631
8632+#include <linux/const.h>
8633+
8634 #define L1_CACHE_SHIFT 4
8635-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8636+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8637
8638 #endif /* _ASM_SCORE_CACHE_H */
8639diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
8640index f9f3cd5..58ff438 100644
8641--- a/arch/score/include/asm/exec.h
8642+++ b/arch/score/include/asm/exec.h
8643@@ -1,6 +1,6 @@
8644 #ifndef _ASM_SCORE_EXEC_H
8645 #define _ASM_SCORE_EXEC_H
8646
8647-extern unsigned long arch_align_stack(unsigned long sp);
8648+#define arch_align_stack(x) (x)
8649
8650 #endif /* _ASM_SCORE_EXEC_H */
8651diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
8652index f4c6d02..e9355c3 100644
8653--- a/arch/score/kernel/process.c
8654+++ b/arch/score/kernel/process.c
8655@@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task)
8656
8657 return task_pt_regs(task)->cp0_epc;
8658 }
8659-
8660-unsigned long arch_align_stack(unsigned long sp)
8661-{
8662- return sp;
8663-}
8664diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
8665index ef9e555..331bd29 100644
8666--- a/arch/sh/include/asm/cache.h
8667+++ b/arch/sh/include/asm/cache.h
8668@@ -9,10 +9,11 @@
8669 #define __ASM_SH_CACHE_H
8670 #ifdef __KERNEL__
8671
8672+#include <linux/const.h>
8673 #include <linux/init.h>
8674 #include <cpu/cache.h>
8675
8676-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8677+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8678
8679 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
8680
8681diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
8682index 03f2b55..b0270327 100644
8683--- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
8684+++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
8685@@ -143,7 +143,7 @@ shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
8686 return NOTIFY_OK;
8687 }
8688
8689-static struct notifier_block __cpuinitdata shx3_cpu_notifier = {
8690+static struct notifier_block shx3_cpu_notifier = {
8691 .notifier_call = shx3_cpu_callback,
8692 };
8693
8694diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
8695index 6777177..cb5e44f 100644
8696--- a/arch/sh/mm/mmap.c
8697+++ b/arch/sh/mm/mmap.c
8698@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
8699 struct mm_struct *mm = current->mm;
8700 struct vm_area_struct *vma;
8701 int do_colour_align;
8702+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8703 struct vm_unmapped_area_info info;
8704
8705 if (flags & MAP_FIXED) {
8706@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
8707 if (filp || (flags & MAP_SHARED))
8708 do_colour_align = 1;
8709
8710+#ifdef CONFIG_PAX_RANDMMAP
8711+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8712+#endif
8713+
8714 if (addr) {
8715 if (do_colour_align)
8716 addr = COLOUR_ALIGN(addr, pgoff);
8717@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
8718 addr = PAGE_ALIGN(addr);
8719
8720 vma = find_vma(mm, addr);
8721- if (TASK_SIZE - len >= addr &&
8722- (!vma || addr + len <= vma->vm_start))
8723+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8724 return addr;
8725 }
8726
8727 info.flags = 0;
8728 info.length = len;
8729- info.low_limit = TASK_UNMAPPED_BASE;
8730+ info.low_limit = mm->mmap_base;
8731 info.high_limit = TASK_SIZE;
8732 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
8733 info.align_offset = pgoff << PAGE_SHIFT;
8734@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8735 struct mm_struct *mm = current->mm;
8736 unsigned long addr = addr0;
8737 int do_colour_align;
8738+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8739 struct vm_unmapped_area_info info;
8740
8741 if (flags & MAP_FIXED) {
8742@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8743 if (filp || (flags & MAP_SHARED))
8744 do_colour_align = 1;
8745
8746+#ifdef CONFIG_PAX_RANDMMAP
8747+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8748+#endif
8749+
8750 /* requesting a specific address */
8751 if (addr) {
8752 if (do_colour_align)
8753@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8754 addr = PAGE_ALIGN(addr);
8755
8756 vma = find_vma(mm, addr);
8757- if (TASK_SIZE - len >= addr &&
8758- (!vma || addr + len <= vma->vm_start))
8759+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8760 return addr;
8761 }
8762
8763@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8764 VM_BUG_ON(addr != -ENOMEM);
8765 info.flags = 0;
8766 info.low_limit = TASK_UNMAPPED_BASE;
8767+
8768+#ifdef CONFIG_PAX_RANDMMAP
8769+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8770+ info.low_limit += mm->delta_mmap;
8771+#endif
8772+
8773 info.high_limit = TASK_SIZE;
8774 addr = vm_unmapped_area(&info);
8775 }
8776diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
8777index be56a24..443328f 100644
8778--- a/arch/sparc/include/asm/atomic_64.h
8779+++ b/arch/sparc/include/asm/atomic_64.h
8780@@ -14,18 +14,40 @@
8781 #define ATOMIC64_INIT(i) { (i) }
8782
8783 #define atomic_read(v) (*(volatile int *)&(v)->counter)
8784+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
8785+{
8786+ return v->counter;
8787+}
8788 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
8789+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
8790+{
8791+ return v->counter;
8792+}
8793
8794 #define atomic_set(v, i) (((v)->counter) = i)
8795+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8796+{
8797+ v->counter = i;
8798+}
8799 #define atomic64_set(v, i) (((v)->counter) = i)
8800+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
8801+{
8802+ v->counter = i;
8803+}
8804
8805 extern void atomic_add(int, atomic_t *);
8806+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
8807 extern void atomic64_add(long, atomic64_t *);
8808+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
8809 extern void atomic_sub(int, atomic_t *);
8810+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
8811 extern void atomic64_sub(long, atomic64_t *);
8812+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
8813
8814 extern int atomic_add_ret(int, atomic_t *);
8815+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
8816 extern long atomic64_add_ret(long, atomic64_t *);
8817+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
8818 extern int atomic_sub_ret(int, atomic_t *);
8819 extern long atomic64_sub_ret(long, atomic64_t *);
8820
8821@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
8822 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
8823
8824 #define atomic_inc_return(v) atomic_add_ret(1, v)
8825+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
8826+{
8827+ return atomic_add_ret_unchecked(1, v);
8828+}
8829 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
8830+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
8831+{
8832+ return atomic64_add_ret_unchecked(1, v);
8833+}
8834
8835 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
8836 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
8837
8838 #define atomic_add_return(i, v) atomic_add_ret(i, v)
8839+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
8840+{
8841+ return atomic_add_ret_unchecked(i, v);
8842+}
8843 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
8844+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
8845+{
8846+ return atomic64_add_ret_unchecked(i, v);
8847+}
8848
8849 /*
8850 * atomic_inc_and_test - increment and test
8851@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
8852 * other cases.
8853 */
8854 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
8855+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
8856+{
8857+ return atomic_inc_return_unchecked(v) == 0;
8858+}
8859 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
8860
8861 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
8862@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
8863 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
8864
8865 #define atomic_inc(v) atomic_add(1, v)
8866+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
8867+{
8868+ atomic_add_unchecked(1, v);
8869+}
8870 #define atomic64_inc(v) atomic64_add(1, v)
8871+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
8872+{
8873+ atomic64_add_unchecked(1, v);
8874+}
8875
8876 #define atomic_dec(v) atomic_sub(1, v)
8877+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
8878+{
8879+ atomic_sub_unchecked(1, v);
8880+}
8881 #define atomic64_dec(v) atomic64_sub(1, v)
8882+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
8883+{
8884+ atomic64_sub_unchecked(1, v);
8885+}
8886
8887 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
8888 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
8889
8890 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
8891+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8892+{
8893+ return cmpxchg(&v->counter, old, new);
8894+}
8895 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
8896+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8897+{
8898+ return xchg(&v->counter, new);
8899+}
8900
8901 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
8902 {
8903- int c, old;
8904+ int c, old, new;
8905 c = atomic_read(v);
8906 for (;;) {
8907- if (unlikely(c == (u)))
8908+ if (unlikely(c == u))
8909 break;
8910- old = atomic_cmpxchg((v), c, c + (a));
8911+
8912+ asm volatile("addcc %2, %0, %0\n"
8913+
8914+#ifdef CONFIG_PAX_REFCOUNT
8915+ "tvs %%icc, 6\n"
8916+#endif
8917+
8918+ : "=r" (new)
8919+ : "0" (c), "ir" (a)
8920+ : "cc");
8921+
8922+ old = atomic_cmpxchg(v, c, new);
8923 if (likely(old == c))
8924 break;
8925 c = old;
8926@@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
8927 #define atomic64_cmpxchg(v, o, n) \
8928 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
8929 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
8930+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
8931+{
8932+ return xchg(&v->counter, new);
8933+}
8934
8935 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
8936 {
8937- long c, old;
8938+ long c, old, new;
8939 c = atomic64_read(v);
8940 for (;;) {
8941- if (unlikely(c == (u)))
8942+ if (unlikely(c == u))
8943 break;
8944- old = atomic64_cmpxchg((v), c, c + (a));
8945+
8946+ asm volatile("addcc %2, %0, %0\n"
8947+
8948+#ifdef CONFIG_PAX_REFCOUNT
8949+ "tvs %%xcc, 6\n"
8950+#endif
8951+
8952+ : "=r" (new)
8953+ : "0" (c), "ir" (a)
8954+ : "cc");
8955+
8956+ old = atomic64_cmpxchg(v, c, new);
8957 if (likely(old == c))
8958 break;
8959 c = old;
8960 }
8961- return c != (u);
8962+ return c != u;
8963 }
8964
8965 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
8966diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
8967index 5bb6991..5c2132e 100644
8968--- a/arch/sparc/include/asm/cache.h
8969+++ b/arch/sparc/include/asm/cache.h
8970@@ -7,10 +7,12 @@
8971 #ifndef _SPARC_CACHE_H
8972 #define _SPARC_CACHE_H
8973
8974+#include <linux/const.h>
8975+
8976 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
8977
8978 #define L1_CACHE_SHIFT 5
8979-#define L1_CACHE_BYTES 32
8980+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8981
8982 #ifdef CONFIG_SPARC32
8983 #define SMP_CACHE_BYTES_SHIFT 5
8984diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
8985index a24e41f..47677ff 100644
8986--- a/arch/sparc/include/asm/elf_32.h
8987+++ b/arch/sparc/include/asm/elf_32.h
8988@@ -114,6 +114,13 @@ typedef struct {
8989
8990 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
8991
8992+#ifdef CONFIG_PAX_ASLR
8993+#define PAX_ELF_ET_DYN_BASE 0x10000UL
8994+
8995+#define PAX_DELTA_MMAP_LEN 16
8996+#define PAX_DELTA_STACK_LEN 16
8997+#endif
8998+
8999 /* This yields a mask that user programs can use to figure out what
9000 instruction set this cpu supports. This can NOT be done in userspace
9001 on Sparc. */
9002diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
9003index 370ca1e..d4f4a98 100644
9004--- a/arch/sparc/include/asm/elf_64.h
9005+++ b/arch/sparc/include/asm/elf_64.h
9006@@ -189,6 +189,13 @@ typedef struct {
9007 #define ELF_ET_DYN_BASE 0x0000010000000000UL
9008 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
9009
9010+#ifdef CONFIG_PAX_ASLR
9011+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
9012+
9013+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
9014+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
9015+#endif
9016+
9017 extern unsigned long sparc64_elf_hwcap;
9018 #define ELF_HWCAP sparc64_elf_hwcap
9019
9020diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
9021index 9b1c36d..209298b 100644
9022--- a/arch/sparc/include/asm/pgalloc_32.h
9023+++ b/arch/sparc/include/asm/pgalloc_32.h
9024@@ -33,6 +33,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
9025 }
9026
9027 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
9028+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
9029
9030 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
9031 unsigned long address)
9032diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
9033index bcfe063..b333142 100644
9034--- a/arch/sparc/include/asm/pgalloc_64.h
9035+++ b/arch/sparc/include/asm/pgalloc_64.h
9036@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
9037 }
9038
9039 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
9040+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
9041
9042 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
9043 {
9044diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
9045index 6fc1348..390c50a 100644
9046--- a/arch/sparc/include/asm/pgtable_32.h
9047+++ b/arch/sparc/include/asm/pgtable_32.h
9048@@ -50,6 +50,9 @@ extern unsigned long calc_highpages(void);
9049 #define PAGE_SHARED SRMMU_PAGE_SHARED
9050 #define PAGE_COPY SRMMU_PAGE_COPY
9051 #define PAGE_READONLY SRMMU_PAGE_RDONLY
9052+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
9053+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
9054+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
9055 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
9056
9057 /* Top-level page directory - dummy used by init-mm.
9058@@ -62,18 +65,18 @@ extern unsigned long ptr_in_current_pgd;
9059
9060 /* xwr */
9061 #define __P000 PAGE_NONE
9062-#define __P001 PAGE_READONLY
9063-#define __P010 PAGE_COPY
9064-#define __P011 PAGE_COPY
9065+#define __P001 PAGE_READONLY_NOEXEC
9066+#define __P010 PAGE_COPY_NOEXEC
9067+#define __P011 PAGE_COPY_NOEXEC
9068 #define __P100 PAGE_READONLY
9069 #define __P101 PAGE_READONLY
9070 #define __P110 PAGE_COPY
9071 #define __P111 PAGE_COPY
9072
9073 #define __S000 PAGE_NONE
9074-#define __S001 PAGE_READONLY
9075-#define __S010 PAGE_SHARED
9076-#define __S011 PAGE_SHARED
9077+#define __S001 PAGE_READONLY_NOEXEC
9078+#define __S010 PAGE_SHARED_NOEXEC
9079+#define __S011 PAGE_SHARED_NOEXEC
9080 #define __S100 PAGE_READONLY
9081 #define __S101 PAGE_READONLY
9082 #define __S110 PAGE_SHARED
9083diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
9084index 79da178..c2eede8 100644
9085--- a/arch/sparc/include/asm/pgtsrmmu.h
9086+++ b/arch/sparc/include/asm/pgtsrmmu.h
9087@@ -115,6 +115,11 @@
9088 SRMMU_EXEC | SRMMU_REF)
9089 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
9090 SRMMU_EXEC | SRMMU_REF)
9091+
9092+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
9093+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
9094+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
9095+
9096 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
9097 SRMMU_DIRTY | SRMMU_REF)
9098
9099diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
9100index 9689176..63c18ea 100644
9101--- a/arch/sparc/include/asm/spinlock_64.h
9102+++ b/arch/sparc/include/asm/spinlock_64.h
9103@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
9104
9105 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
9106
9107-static void inline arch_read_lock(arch_rwlock_t *lock)
9108+static inline void arch_read_lock(arch_rwlock_t *lock)
9109 {
9110 unsigned long tmp1, tmp2;
9111
9112 __asm__ __volatile__ (
9113 "1: ldsw [%2], %0\n"
9114 " brlz,pn %0, 2f\n"
9115-"4: add %0, 1, %1\n"
9116+"4: addcc %0, 1, %1\n"
9117+
9118+#ifdef CONFIG_PAX_REFCOUNT
9119+" tvs %%icc, 6\n"
9120+#endif
9121+
9122 " cas [%2], %0, %1\n"
9123 " cmp %0, %1\n"
9124 " bne,pn %%icc, 1b\n"
9125@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
9126 " .previous"
9127 : "=&r" (tmp1), "=&r" (tmp2)
9128 : "r" (lock)
9129- : "memory");
9130+ : "memory", "cc");
9131 }
9132
9133-static int inline arch_read_trylock(arch_rwlock_t *lock)
9134+static inline int arch_read_trylock(arch_rwlock_t *lock)
9135 {
9136 int tmp1, tmp2;
9137
9138@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
9139 "1: ldsw [%2], %0\n"
9140 " brlz,a,pn %0, 2f\n"
9141 " mov 0, %0\n"
9142-" add %0, 1, %1\n"
9143+" addcc %0, 1, %1\n"
9144+
9145+#ifdef CONFIG_PAX_REFCOUNT
9146+" tvs %%icc, 6\n"
9147+#endif
9148+
9149 " cas [%2], %0, %1\n"
9150 " cmp %0, %1\n"
9151 " bne,pn %%icc, 1b\n"
9152@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
9153 return tmp1;
9154 }
9155
9156-static void inline arch_read_unlock(arch_rwlock_t *lock)
9157+static inline void arch_read_unlock(arch_rwlock_t *lock)
9158 {
9159 unsigned long tmp1, tmp2;
9160
9161 __asm__ __volatile__(
9162 "1: lduw [%2], %0\n"
9163-" sub %0, 1, %1\n"
9164+" subcc %0, 1, %1\n"
9165+
9166+#ifdef CONFIG_PAX_REFCOUNT
9167+" tvs %%icc, 6\n"
9168+#endif
9169+
9170 " cas [%2], %0, %1\n"
9171 " cmp %0, %1\n"
9172 " bne,pn %%xcc, 1b\n"
9173@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
9174 : "memory");
9175 }
9176
9177-static void inline arch_write_lock(arch_rwlock_t *lock)
9178+static inline void arch_write_lock(arch_rwlock_t *lock)
9179 {
9180 unsigned long mask, tmp1, tmp2;
9181
9182@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
9183 : "memory");
9184 }
9185
9186-static void inline arch_write_unlock(arch_rwlock_t *lock)
9187+static inline void arch_write_unlock(arch_rwlock_t *lock)
9188 {
9189 __asm__ __volatile__(
9190 " stw %%g0, [%0]"
9191@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
9192 : "memory");
9193 }
9194
9195-static int inline arch_write_trylock(arch_rwlock_t *lock)
9196+static inline int arch_write_trylock(arch_rwlock_t *lock)
9197 {
9198 unsigned long mask, tmp1, tmp2, result;
9199
9200diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
9201index dd38075..e7cac83 100644
9202--- a/arch/sparc/include/asm/thread_info_32.h
9203+++ b/arch/sparc/include/asm/thread_info_32.h
9204@@ -49,6 +49,8 @@ struct thread_info {
9205 unsigned long w_saved;
9206
9207 struct restart_block restart_block;
9208+
9209+ unsigned long lowest_stack;
9210 };
9211
9212 /*
9213diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
9214index d5e5042..9bfee76 100644
9215--- a/arch/sparc/include/asm/thread_info_64.h
9216+++ b/arch/sparc/include/asm/thread_info_64.h
9217@@ -63,6 +63,8 @@ struct thread_info {
9218 struct pt_regs *kern_una_regs;
9219 unsigned int kern_una_insn;
9220
9221+ unsigned long lowest_stack;
9222+
9223 unsigned long fpregs[0] __attribute__ ((aligned(64)));
9224 };
9225
9226@@ -192,10 +194,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
9227 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
9228 /* flag bit 6 is available */
9229 #define TIF_32BIT 7 /* 32-bit binary */
9230-/* flag bit 8 is available */
9231+#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */
9232 #define TIF_SECCOMP 9 /* secure computing */
9233 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
9234 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
9235+
9236 /* NOTE: Thread flags >= 12 should be ones we have no interest
9237 * in using in assembly, else we can't use the mask as
9238 * an immediate value in instructions such as andcc.
9239@@ -214,12 +217,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
9240 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
9241 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
9242 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
9243+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
9244
9245 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
9246 _TIF_DO_NOTIFY_RESUME_MASK | \
9247 _TIF_NEED_RESCHED)
9248 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
9249
9250+#define _TIF_WORK_SYSCALL \
9251+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
9252+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
9253+
9254+
9255 /*
9256 * Thread-synchronous status.
9257 *
9258diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
9259index 0167d26..767bb0c 100644
9260--- a/arch/sparc/include/asm/uaccess.h
9261+++ b/arch/sparc/include/asm/uaccess.h
9262@@ -1,5 +1,6 @@
9263 #ifndef ___ASM_SPARC_UACCESS_H
9264 #define ___ASM_SPARC_UACCESS_H
9265+
9266 #if defined(__sparc__) && defined(__arch64__)
9267 #include <asm/uaccess_64.h>
9268 #else
9269diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
9270index 53a28dd..50c38c3 100644
9271--- a/arch/sparc/include/asm/uaccess_32.h
9272+++ b/arch/sparc/include/asm/uaccess_32.h
9273@@ -250,27 +250,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
9274
9275 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
9276 {
9277- if (n && __access_ok((unsigned long) to, n))
9278+ if ((long)n < 0)
9279+ return n;
9280+
9281+ if (n && __access_ok((unsigned long) to, n)) {
9282+ if (!__builtin_constant_p(n))
9283+ check_object_size(from, n, true);
9284 return __copy_user(to, (__force void __user *) from, n);
9285- else
9286+ } else
9287 return n;
9288 }
9289
9290 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
9291 {
9292+ if ((long)n < 0)
9293+ return n;
9294+
9295+ if (!__builtin_constant_p(n))
9296+ check_object_size(from, n, true);
9297+
9298 return __copy_user(to, (__force void __user *) from, n);
9299 }
9300
9301 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
9302 {
9303- if (n && __access_ok((unsigned long) from, n))
9304+ if ((long)n < 0)
9305+ return n;
9306+
9307+ if (n && __access_ok((unsigned long) from, n)) {
9308+ if (!__builtin_constant_p(n))
9309+ check_object_size(to, n, false);
9310 return __copy_user((__force void __user *) to, from, n);
9311- else
9312+ } else
9313 return n;
9314 }
9315
9316 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
9317 {
9318+ if ((long)n < 0)
9319+ return n;
9320+
9321 return __copy_user((__force void __user *) to, from, n);
9322 }
9323
9324diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
9325index e562d3c..191f176 100644
9326--- a/arch/sparc/include/asm/uaccess_64.h
9327+++ b/arch/sparc/include/asm/uaccess_64.h
9328@@ -10,6 +10,7 @@
9329 #include <linux/compiler.h>
9330 #include <linux/string.h>
9331 #include <linux/thread_info.h>
9332+#include <linux/kernel.h>
9333 #include <asm/asi.h>
9334 #include <asm/spitfire.h>
9335 #include <asm-generic/uaccess-unaligned.h>
9336@@ -214,8 +215,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
9337 static inline unsigned long __must_check
9338 copy_from_user(void *to, const void __user *from, unsigned long size)
9339 {
9340- unsigned long ret = ___copy_from_user(to, from, size);
9341+ unsigned long ret;
9342
9343+ if ((long)size < 0 || size > INT_MAX)
9344+ return size;
9345+
9346+ if (!__builtin_constant_p(size))
9347+ check_object_size(to, size, false);
9348+
9349+ ret = ___copy_from_user(to, from, size);
9350 if (unlikely(ret))
9351 ret = copy_from_user_fixup(to, from, size);
9352
9353@@ -231,8 +239,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
9354 static inline unsigned long __must_check
9355 copy_to_user(void __user *to, const void *from, unsigned long size)
9356 {
9357- unsigned long ret = ___copy_to_user(to, from, size);
9358+ unsigned long ret;
9359
9360+ if ((long)size < 0 || size > INT_MAX)
9361+ return size;
9362+
9363+ if (!__builtin_constant_p(size))
9364+ check_object_size(from, size, true);
9365+
9366+ ret = ___copy_to_user(to, from, size);
9367 if (unlikely(ret))
9368 ret = copy_to_user_fixup(to, from, size);
9369 return ret;
9370diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
9371index d432fb2..6056af1 100644
9372--- a/arch/sparc/kernel/Makefile
9373+++ b/arch/sparc/kernel/Makefile
9374@@ -3,7 +3,7 @@
9375 #
9376
9377 asflags-y := -ansi
9378-ccflags-y := -Werror
9379+#ccflags-y := -Werror
9380
9381 extra-y := head_$(BITS).o
9382
9383diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
9384index 5ef48da..11d460f 100644
9385--- a/arch/sparc/kernel/ds.c
9386+++ b/arch/sparc/kernel/ds.c
9387@@ -783,6 +783,16 @@ void ldom_set_var(const char *var, const char *value)
9388 char *base, *p;
9389 int msg_len, loops;
9390
9391+ if (strlen(var) + strlen(value) + 2 >
9392+ sizeof(pkt) - sizeof(pkt.header)) {
9393+ printk(KERN_ERR PFX
9394+ "contents length: %zu, which more than max: %lu,"
9395+ "so could not set (%s) variable to (%s).\n",
9396+ strlen(var) + strlen(value) + 2,
9397+ sizeof(pkt) - sizeof(pkt.header), var, value);
9398+ return;
9399+ }
9400+
9401 memset(&pkt, 0, sizeof(pkt));
9402 pkt.header.data.tag.type = DS_DATA;
9403 pkt.header.data.handle = cp->handle;
9404diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
9405index fdd819d..5af08c8 100644
9406--- a/arch/sparc/kernel/process_32.c
9407+++ b/arch/sparc/kernel/process_32.c
9408@@ -116,14 +116,14 @@ void show_regs(struct pt_regs *r)
9409
9410 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
9411 r->psr, r->pc, r->npc, r->y, print_tainted());
9412- printk("PC: <%pS>\n", (void *) r->pc);
9413+ printk("PC: <%pA>\n", (void *) r->pc);
9414 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
9415 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
9416 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
9417 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
9418 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
9419 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
9420- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
9421+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
9422
9423 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
9424 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
9425@@ -160,7 +160,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
9426 rw = (struct reg_window32 *) fp;
9427 pc = rw->ins[7];
9428 printk("[%08lx : ", pc);
9429- printk("%pS ] ", (void *) pc);
9430+ printk("%pA ] ", (void *) pc);
9431 fp = rw->ins[6];
9432 } while (++count < 16);
9433 printk("\n");
9434diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
9435index baebab2..9cd13b1 100644
9436--- a/arch/sparc/kernel/process_64.c
9437+++ b/arch/sparc/kernel/process_64.c
9438@@ -158,7 +158,7 @@ static void show_regwindow(struct pt_regs *regs)
9439 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
9440 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
9441 if (regs->tstate & TSTATE_PRIV)
9442- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
9443+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
9444 }
9445
9446 void show_regs(struct pt_regs *regs)
9447@@ -167,7 +167,7 @@ void show_regs(struct pt_regs *regs)
9448
9449 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
9450 regs->tpc, regs->tnpc, regs->y, print_tainted());
9451- printk("TPC: <%pS>\n", (void *) regs->tpc);
9452+ printk("TPC: <%pA>\n", (void *) regs->tpc);
9453 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
9454 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
9455 regs->u_regs[3]);
9456@@ -180,7 +180,7 @@ void show_regs(struct pt_regs *regs)
9457 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
9458 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
9459 regs->u_regs[15]);
9460- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
9461+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
9462 show_regwindow(regs);
9463 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
9464 }
9465@@ -269,7 +269,7 @@ void arch_trigger_all_cpu_backtrace(void)
9466 ((tp && tp->task) ? tp->task->pid : -1));
9467
9468 if (gp->tstate & TSTATE_PRIV) {
9469- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
9470+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
9471 (void *) gp->tpc,
9472 (void *) gp->o7,
9473 (void *) gp->i7,
9474diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
9475index 79cc0d1..ec62734 100644
9476--- a/arch/sparc/kernel/prom_common.c
9477+++ b/arch/sparc/kernel/prom_common.c
9478@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
9479
9480 unsigned int prom_early_allocated __initdata;
9481
9482-static struct of_pdt_ops prom_sparc_ops __initdata = {
9483+static struct of_pdt_ops prom_sparc_ops __initconst = {
9484 .nextprop = prom_common_nextprop,
9485 .getproplen = prom_getproplen,
9486 .getproperty = prom_getproperty,
9487diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
9488index 7ff45e4..a58f271 100644
9489--- a/arch/sparc/kernel/ptrace_64.c
9490+++ b/arch/sparc/kernel/ptrace_64.c
9491@@ -1057,6 +1057,10 @@ long arch_ptrace(struct task_struct *child, long request,
9492 return ret;
9493 }
9494
9495+#ifdef CONFIG_GRKERNSEC_SETXID
9496+extern void gr_delayed_cred_worker(void);
9497+#endif
9498+
9499 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
9500 {
9501 int ret = 0;
9502@@ -1064,6 +1068,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
9503 /* do the secure computing check first */
9504 secure_computing_strict(regs->u_regs[UREG_G1]);
9505
9506+#ifdef CONFIG_GRKERNSEC_SETXID
9507+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9508+ gr_delayed_cred_worker();
9509+#endif
9510+
9511 if (test_thread_flag(TIF_SYSCALL_TRACE))
9512 ret = tracehook_report_syscall_entry(regs);
9513
9514@@ -1084,6 +1093,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
9515
9516 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
9517 {
9518+#ifdef CONFIG_GRKERNSEC_SETXID
9519+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9520+ gr_delayed_cred_worker();
9521+#endif
9522+
9523 audit_syscall_exit(regs);
9524
9525 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
9526diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
9527index 3a8d184..49498a8 100644
9528--- a/arch/sparc/kernel/sys_sparc_32.c
9529+++ b/arch/sparc/kernel/sys_sparc_32.c
9530@@ -52,7 +52,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
9531 if (len > TASK_SIZE - PAGE_SIZE)
9532 return -ENOMEM;
9533 if (!addr)
9534- addr = TASK_UNMAPPED_BASE;
9535+ addr = current->mm->mmap_base;
9536
9537 info.flags = 0;
9538 info.length = len;
9539diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
9540index 2daaaa6..4fb84dc 100644
9541--- a/arch/sparc/kernel/sys_sparc_64.c
9542+++ b/arch/sparc/kernel/sys_sparc_64.c
9543@@ -90,13 +90,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
9544 struct vm_area_struct * vma;
9545 unsigned long task_size = TASK_SIZE;
9546 int do_color_align;
9547+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9548 struct vm_unmapped_area_info info;
9549
9550 if (flags & MAP_FIXED) {
9551 /* We do not accept a shared mapping if it would violate
9552 * cache aliasing constraints.
9553 */
9554- if ((flags & MAP_SHARED) &&
9555+ if ((filp || (flags & MAP_SHARED)) &&
9556 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
9557 return -EINVAL;
9558 return addr;
9559@@ -111,6 +112,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
9560 if (filp || (flags & MAP_SHARED))
9561 do_color_align = 1;
9562
9563+#ifdef CONFIG_PAX_RANDMMAP
9564+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9565+#endif
9566+
9567 if (addr) {
9568 if (do_color_align)
9569 addr = COLOR_ALIGN(addr, pgoff);
9570@@ -118,22 +123,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
9571 addr = PAGE_ALIGN(addr);
9572
9573 vma = find_vma(mm, addr);
9574- if (task_size - len >= addr &&
9575- (!vma || addr + len <= vma->vm_start))
9576+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9577 return addr;
9578 }
9579
9580 info.flags = 0;
9581 info.length = len;
9582- info.low_limit = TASK_UNMAPPED_BASE;
9583+ info.low_limit = mm->mmap_base;
9584 info.high_limit = min(task_size, VA_EXCLUDE_START);
9585 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
9586 info.align_offset = pgoff << PAGE_SHIFT;
9587+ info.threadstack_offset = offset;
9588 addr = vm_unmapped_area(&info);
9589
9590 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
9591 VM_BUG_ON(addr != -ENOMEM);
9592 info.low_limit = VA_EXCLUDE_END;
9593+
9594+#ifdef CONFIG_PAX_RANDMMAP
9595+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9596+ info.low_limit += mm->delta_mmap;
9597+#endif
9598+
9599 info.high_limit = task_size;
9600 addr = vm_unmapped_area(&info);
9601 }
9602@@ -151,6 +162,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9603 unsigned long task_size = STACK_TOP32;
9604 unsigned long addr = addr0;
9605 int do_color_align;
9606+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9607 struct vm_unmapped_area_info info;
9608
9609 /* This should only ever run for 32-bit processes. */
9610@@ -160,7 +172,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9611 /* We do not accept a shared mapping if it would violate
9612 * cache aliasing constraints.
9613 */
9614- if ((flags & MAP_SHARED) &&
9615+ if ((filp || (flags & MAP_SHARED)) &&
9616 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
9617 return -EINVAL;
9618 return addr;
9619@@ -173,6 +185,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9620 if (filp || (flags & MAP_SHARED))
9621 do_color_align = 1;
9622
9623+#ifdef CONFIG_PAX_RANDMMAP
9624+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9625+#endif
9626+
9627 /* requesting a specific address */
9628 if (addr) {
9629 if (do_color_align)
9630@@ -181,8 +197,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9631 addr = PAGE_ALIGN(addr);
9632
9633 vma = find_vma(mm, addr);
9634- if (task_size - len >= addr &&
9635- (!vma || addr + len <= vma->vm_start))
9636+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9637 return addr;
9638 }
9639
9640@@ -192,6 +207,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9641 info.high_limit = mm->mmap_base;
9642 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
9643 info.align_offset = pgoff << PAGE_SHIFT;
9644+ info.threadstack_offset = offset;
9645 addr = vm_unmapped_area(&info);
9646
9647 /*
9648@@ -204,6 +220,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9649 VM_BUG_ON(addr != -ENOMEM);
9650 info.flags = 0;
9651 info.low_limit = TASK_UNMAPPED_BASE;
9652+
9653+#ifdef CONFIG_PAX_RANDMMAP
9654+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9655+ info.low_limit += mm->delta_mmap;
9656+#endif
9657+
9658 info.high_limit = STACK_TOP32;
9659 addr = vm_unmapped_area(&info);
9660 }
9661@@ -260,10 +282,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
9662 EXPORT_SYMBOL(get_fb_unmapped_area);
9663
9664 /* Essentially the same as PowerPC. */
9665-static unsigned long mmap_rnd(void)
9666+static unsigned long mmap_rnd(struct mm_struct *mm)
9667 {
9668 unsigned long rnd = 0UL;
9669
9670+#ifdef CONFIG_PAX_RANDMMAP
9671+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9672+#endif
9673+
9674 if (current->flags & PF_RANDOMIZE) {
9675 unsigned long val = get_random_int();
9676 if (test_thread_flag(TIF_32BIT))
9677@@ -276,7 +302,7 @@ static unsigned long mmap_rnd(void)
9678
9679 void arch_pick_mmap_layout(struct mm_struct *mm)
9680 {
9681- unsigned long random_factor = mmap_rnd();
9682+ unsigned long random_factor = mmap_rnd(mm);
9683 unsigned long gap;
9684
9685 /*
9686@@ -289,6 +315,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9687 gap == RLIM_INFINITY ||
9688 sysctl_legacy_va_layout) {
9689 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
9690+
9691+#ifdef CONFIG_PAX_RANDMMAP
9692+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9693+ mm->mmap_base += mm->delta_mmap;
9694+#endif
9695+
9696 mm->get_unmapped_area = arch_get_unmapped_area;
9697 mm->unmap_area = arch_unmap_area;
9698 } else {
9699@@ -301,6 +333,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9700 gap = (task_size / 6 * 5);
9701
9702 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
9703+
9704+#ifdef CONFIG_PAX_RANDMMAP
9705+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9706+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9707+#endif
9708+
9709 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9710 mm->unmap_area = arch_unmap_area_topdown;
9711 }
9712diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
9713index 22a1098..6255eb9 100644
9714--- a/arch/sparc/kernel/syscalls.S
9715+++ b/arch/sparc/kernel/syscalls.S
9716@@ -52,7 +52,7 @@ sys32_rt_sigreturn:
9717 #endif
9718 .align 32
9719 1: ldx [%g6 + TI_FLAGS], %l5
9720- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
9721+ andcc %l5, _TIF_WORK_SYSCALL, %g0
9722 be,pt %icc, rtrap
9723 nop
9724 call syscall_trace_leave
9725@@ -184,7 +184,7 @@ linux_sparc_syscall32:
9726
9727 srl %i5, 0, %o5 ! IEU1
9728 srl %i2, 0, %o2 ! IEU0 Group
9729- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
9730+ andcc %l0, _TIF_WORK_SYSCALL, %g0
9731 bne,pn %icc, linux_syscall_trace32 ! CTI
9732 mov %i0, %l5 ! IEU1
9733 call %l7 ! CTI Group brk forced
9734@@ -207,7 +207,7 @@ linux_sparc_syscall:
9735
9736 mov %i3, %o3 ! IEU1
9737 mov %i4, %o4 ! IEU0 Group
9738- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
9739+ andcc %l0, _TIF_WORK_SYSCALL, %g0
9740 bne,pn %icc, linux_syscall_trace ! CTI Group
9741 mov %i0, %l5 ! IEU0
9742 2: call %l7 ! CTI Group brk forced
9743@@ -223,7 +223,7 @@ ret_sys_call:
9744
9745 cmp %o0, -ERESTART_RESTARTBLOCK
9746 bgeu,pn %xcc, 1f
9747- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
9748+ andcc %l0, _TIF_WORK_SYSCALL, %g0
9749 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
9750
9751 2:
9752diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
9753index 654e8aa..45f431b 100644
9754--- a/arch/sparc/kernel/sysfs.c
9755+++ b/arch/sparc/kernel/sysfs.c
9756@@ -266,7 +266,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
9757 return NOTIFY_OK;
9758 }
9759
9760-static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
9761+static struct notifier_block sysfs_cpu_nb = {
9762 .notifier_call = sysfs_cpu_notify,
9763 };
9764
9765diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
9766index 6629829..036032d 100644
9767--- a/arch/sparc/kernel/traps_32.c
9768+++ b/arch/sparc/kernel/traps_32.c
9769@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
9770 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
9771 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
9772
9773+extern void gr_handle_kernel_exploit(void);
9774+
9775 void die_if_kernel(char *str, struct pt_regs *regs)
9776 {
9777 static int die_counter;
9778@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
9779 count++ < 30 &&
9780 (((unsigned long) rw) >= PAGE_OFFSET) &&
9781 !(((unsigned long) rw) & 0x7)) {
9782- printk("Caller[%08lx]: %pS\n", rw->ins[7],
9783+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
9784 (void *) rw->ins[7]);
9785 rw = (struct reg_window32 *)rw->ins[6];
9786 }
9787 }
9788 printk("Instruction DUMP:");
9789 instruction_dump ((unsigned long *) regs->pc);
9790- if(regs->psr & PSR_PS)
9791+ if(regs->psr & PSR_PS) {
9792+ gr_handle_kernel_exploit();
9793 do_exit(SIGKILL);
9794+ }
9795 do_exit(SIGSEGV);
9796 }
9797
9798diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
9799index b3f833a..ac74b2d 100644
9800--- a/arch/sparc/kernel/traps_64.c
9801+++ b/arch/sparc/kernel/traps_64.c
9802@@ -76,7 +76,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
9803 i + 1,
9804 p->trapstack[i].tstate, p->trapstack[i].tpc,
9805 p->trapstack[i].tnpc, p->trapstack[i].tt);
9806- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
9807+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
9808 }
9809 }
9810
9811@@ -96,6 +96,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
9812
9813 lvl -= 0x100;
9814 if (regs->tstate & TSTATE_PRIV) {
9815+
9816+#ifdef CONFIG_PAX_REFCOUNT
9817+ if (lvl == 6)
9818+ pax_report_refcount_overflow(regs);
9819+#endif
9820+
9821 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
9822 die_if_kernel(buffer, regs);
9823 }
9824@@ -114,11 +120,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
9825 void bad_trap_tl1(struct pt_regs *regs, long lvl)
9826 {
9827 char buffer[32];
9828-
9829+
9830 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
9831 0, lvl, SIGTRAP) == NOTIFY_STOP)
9832 return;
9833
9834+#ifdef CONFIG_PAX_REFCOUNT
9835+ if (lvl == 6)
9836+ pax_report_refcount_overflow(regs);
9837+#endif
9838+
9839 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
9840
9841 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
9842@@ -1142,7 +1153,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
9843 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
9844 printk("%s" "ERROR(%d): ",
9845 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
9846- printk("TPC<%pS>\n", (void *) regs->tpc);
9847+ printk("TPC<%pA>\n", (void *) regs->tpc);
9848 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
9849 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
9850 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
9851@@ -1749,7 +1760,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
9852 smp_processor_id(),
9853 (type & 0x1) ? 'I' : 'D',
9854 regs->tpc);
9855- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
9856+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
9857 panic("Irrecoverable Cheetah+ parity error.");
9858 }
9859
9860@@ -1757,7 +1768,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
9861 smp_processor_id(),
9862 (type & 0x1) ? 'I' : 'D',
9863 regs->tpc);
9864- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
9865+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
9866 }
9867
9868 struct sun4v_error_entry {
9869@@ -2104,9 +2115,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
9870
9871 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
9872 regs->tpc, tl);
9873- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
9874+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
9875 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
9876- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
9877+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
9878 (void *) regs->u_regs[UREG_I7]);
9879 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
9880 "pte[%lx] error[%lx]\n",
9881@@ -2128,9 +2139,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
9882
9883 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
9884 regs->tpc, tl);
9885- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
9886+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
9887 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
9888- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
9889+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
9890 (void *) regs->u_regs[UREG_I7]);
9891 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
9892 "pte[%lx] error[%lx]\n",
9893@@ -2336,13 +2347,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
9894 fp = (unsigned long)sf->fp + STACK_BIAS;
9895 }
9896
9897- printk(" [%016lx] %pS\n", pc, (void *) pc);
9898+ printk(" [%016lx] %pA\n", pc, (void *) pc);
9899 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
9900 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
9901 int index = tsk->curr_ret_stack;
9902 if (tsk->ret_stack && index >= graph) {
9903 pc = tsk->ret_stack[index - graph].ret;
9904- printk(" [%016lx] %pS\n", pc, (void *) pc);
9905+ printk(" [%016lx] %pA\n", pc, (void *) pc);
9906 graph++;
9907 }
9908 }
9909@@ -2360,6 +2371,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
9910 return (struct reg_window *) (fp + STACK_BIAS);
9911 }
9912
9913+extern void gr_handle_kernel_exploit(void);
9914+
9915 void die_if_kernel(char *str, struct pt_regs *regs)
9916 {
9917 static int die_counter;
9918@@ -2388,7 +2401,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
9919 while (rw &&
9920 count++ < 30 &&
9921 kstack_valid(tp, (unsigned long) rw)) {
9922- printk("Caller[%016lx]: %pS\n", rw->ins[7],
9923+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
9924 (void *) rw->ins[7]);
9925
9926 rw = kernel_stack_up(rw);
9927@@ -2401,8 +2414,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
9928 }
9929 user_instruction_dump ((unsigned int __user *) regs->tpc);
9930 }
9931- if (regs->tstate & TSTATE_PRIV)
9932+ if (regs->tstate & TSTATE_PRIV) {
9933+ gr_handle_kernel_exploit();
9934 do_exit(SIGKILL);
9935+ }
9936 do_exit(SIGSEGV);
9937 }
9938 EXPORT_SYMBOL(die_if_kernel);
9939diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
9940index 8201c25e..072a2a7 100644
9941--- a/arch/sparc/kernel/unaligned_64.c
9942+++ b/arch/sparc/kernel/unaligned_64.c
9943@@ -286,7 +286,7 @@ static void log_unaligned(struct pt_regs *regs)
9944 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
9945
9946 if (__ratelimit(&ratelimit)) {
9947- printk("Kernel unaligned access at TPC[%lx] %pS\n",
9948+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
9949 regs->tpc, (void *) regs->tpc);
9950 }
9951 }
9952diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
9953index dbe119b..089c7c1 100644
9954--- a/arch/sparc/lib/Makefile
9955+++ b/arch/sparc/lib/Makefile
9956@@ -2,7 +2,7 @@
9957 #
9958
9959 asflags-y := -ansi -DST_DIV0=0x02
9960-ccflags-y := -Werror
9961+#ccflags-y := -Werror
9962
9963 lib-$(CONFIG_SPARC32) += ashrdi3.o
9964 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
9965diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
9966index 85c233d..68500e0 100644
9967--- a/arch/sparc/lib/atomic_64.S
9968+++ b/arch/sparc/lib/atomic_64.S
9969@@ -17,7 +17,12 @@
9970 ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
9971 BACKOFF_SETUP(%o2)
9972 1: lduw [%o1], %g1
9973- add %g1, %o0, %g7
9974+ addcc %g1, %o0, %g7
9975+
9976+#ifdef CONFIG_PAX_REFCOUNT
9977+ tvs %icc, 6
9978+#endif
9979+
9980 cas [%o1], %g1, %g7
9981 cmp %g1, %g7
9982 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
9983@@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
9984 2: BACKOFF_SPIN(%o2, %o3, 1b)
9985 ENDPROC(atomic_add)
9986
9987+ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
9988+ BACKOFF_SETUP(%o2)
9989+1: lduw [%o1], %g1
9990+ add %g1, %o0, %g7
9991+ cas [%o1], %g1, %g7
9992+ cmp %g1, %g7
9993+ bne,pn %icc, 2f
9994+ nop
9995+ retl
9996+ nop
9997+2: BACKOFF_SPIN(%o2, %o3, 1b)
9998+ENDPROC(atomic_add_unchecked)
9999+
10000 ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
10001 BACKOFF_SETUP(%o2)
10002 1: lduw [%o1], %g1
10003- sub %g1, %o0, %g7
10004+ subcc %g1, %o0, %g7
10005+
10006+#ifdef CONFIG_PAX_REFCOUNT
10007+ tvs %icc, 6
10008+#endif
10009+
10010 cas [%o1], %g1, %g7
10011 cmp %g1, %g7
10012 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
10013@@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
10014 2: BACKOFF_SPIN(%o2, %o3, 1b)
10015 ENDPROC(atomic_sub)
10016
10017+ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
10018+ BACKOFF_SETUP(%o2)
10019+1: lduw [%o1], %g1
10020+ sub %g1, %o0, %g7
10021+ cas [%o1], %g1, %g7
10022+ cmp %g1, %g7
10023+ bne,pn %icc, 2f
10024+ nop
10025+ retl
10026+ nop
10027+2: BACKOFF_SPIN(%o2, %o3, 1b)
10028+ENDPROC(atomic_sub_unchecked)
10029+
10030 ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
10031 BACKOFF_SETUP(%o2)
10032 1: lduw [%o1], %g1
10033- add %g1, %o0, %g7
10034+ addcc %g1, %o0, %g7
10035+
10036+#ifdef CONFIG_PAX_REFCOUNT
10037+ tvs %icc, 6
10038+#endif
10039+
10040 cas [%o1], %g1, %g7
10041 cmp %g1, %g7
10042 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
10043@@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
10044 2: BACKOFF_SPIN(%o2, %o3, 1b)
10045 ENDPROC(atomic_add_ret)
10046
10047+ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
10048+ BACKOFF_SETUP(%o2)
10049+1: lduw [%o1], %g1
10050+ addcc %g1, %o0, %g7
10051+ cas [%o1], %g1, %g7
10052+ cmp %g1, %g7
10053+ bne,pn %icc, 2f
10054+ add %g7, %o0, %g7
10055+ sra %g7, 0, %o0
10056+ retl
10057+ nop
10058+2: BACKOFF_SPIN(%o2, %o3, 1b)
10059+ENDPROC(atomic_add_ret_unchecked)
10060+
10061 ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
10062 BACKOFF_SETUP(%o2)
10063 1: lduw [%o1], %g1
10064- sub %g1, %o0, %g7
10065+ subcc %g1, %o0, %g7
10066+
10067+#ifdef CONFIG_PAX_REFCOUNT
10068+ tvs %icc, 6
10069+#endif
10070+
10071 cas [%o1], %g1, %g7
10072 cmp %g1, %g7
10073 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
10074@@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret)
10075 ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
10076 BACKOFF_SETUP(%o2)
10077 1: ldx [%o1], %g1
10078- add %g1, %o0, %g7
10079+ addcc %g1, %o0, %g7
10080+
10081+#ifdef CONFIG_PAX_REFCOUNT
10082+ tvs %xcc, 6
10083+#endif
10084+
10085 casx [%o1], %g1, %g7
10086 cmp %g1, %g7
10087 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
10088@@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
10089 2: BACKOFF_SPIN(%o2, %o3, 1b)
10090 ENDPROC(atomic64_add)
10091
10092+ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
10093+ BACKOFF_SETUP(%o2)
10094+1: ldx [%o1], %g1
10095+ addcc %g1, %o0, %g7
10096+ casx [%o1], %g1, %g7
10097+ cmp %g1, %g7
10098+ bne,pn %xcc, 2f
10099+ nop
10100+ retl
10101+ nop
10102+2: BACKOFF_SPIN(%o2, %o3, 1b)
10103+ENDPROC(atomic64_add_unchecked)
10104+
10105 ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
10106 BACKOFF_SETUP(%o2)
10107 1: ldx [%o1], %g1
10108- sub %g1, %o0, %g7
10109+ subcc %g1, %o0, %g7
10110+
10111+#ifdef CONFIG_PAX_REFCOUNT
10112+ tvs %xcc, 6
10113+#endif
10114+
10115 casx [%o1], %g1, %g7
10116 cmp %g1, %g7
10117 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
10118@@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
10119 2: BACKOFF_SPIN(%o2, %o3, 1b)
10120 ENDPROC(atomic64_sub)
10121
10122+ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
10123+ BACKOFF_SETUP(%o2)
10124+1: ldx [%o1], %g1
10125+ subcc %g1, %o0, %g7
10126+ casx [%o1], %g1, %g7
10127+ cmp %g1, %g7
10128+ bne,pn %xcc, 2f
10129+ nop
10130+ retl
10131+ nop
10132+2: BACKOFF_SPIN(%o2, %o3, 1b)
10133+ENDPROC(atomic64_sub_unchecked)
10134+
10135 ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
10136 BACKOFF_SETUP(%o2)
10137 1: ldx [%o1], %g1
10138- add %g1, %o0, %g7
10139+ addcc %g1, %o0, %g7
10140+
10141+#ifdef CONFIG_PAX_REFCOUNT
10142+ tvs %xcc, 6
10143+#endif
10144+
10145 casx [%o1], %g1, %g7
10146 cmp %g1, %g7
10147 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
10148@@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
10149 2: BACKOFF_SPIN(%o2, %o3, 1b)
10150 ENDPROC(atomic64_add_ret)
10151
10152+ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
10153+ BACKOFF_SETUP(%o2)
10154+1: ldx [%o1], %g1
10155+ addcc %g1, %o0, %g7
10156+ casx [%o1], %g1, %g7
10157+ cmp %g1, %g7
10158+ bne,pn %xcc, 2f
10159+ add %g7, %o0, %g7
10160+ mov %g7, %o0
10161+ retl
10162+ nop
10163+2: BACKOFF_SPIN(%o2, %o3, 1b)
10164+ENDPROC(atomic64_add_ret_unchecked)
10165+
10166 ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
10167 BACKOFF_SETUP(%o2)
10168 1: ldx [%o1], %g1
10169- sub %g1, %o0, %g7
10170+ subcc %g1, %o0, %g7
10171+
10172+#ifdef CONFIG_PAX_REFCOUNT
10173+ tvs %xcc, 6
10174+#endif
10175+
10176 casx [%o1], %g1, %g7
10177 cmp %g1, %g7
10178 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
10179diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
10180index 0c4e35e..745d3e4 100644
10181--- a/arch/sparc/lib/ksyms.c
10182+++ b/arch/sparc/lib/ksyms.c
10183@@ -109,12 +109,18 @@ EXPORT_SYMBOL(__downgrade_write);
10184
10185 /* Atomic counter implementation. */
10186 EXPORT_SYMBOL(atomic_add);
10187+EXPORT_SYMBOL(atomic_add_unchecked);
10188 EXPORT_SYMBOL(atomic_add_ret);
10189+EXPORT_SYMBOL(atomic_add_ret_unchecked);
10190 EXPORT_SYMBOL(atomic_sub);
10191+EXPORT_SYMBOL(atomic_sub_unchecked);
10192 EXPORT_SYMBOL(atomic_sub_ret);
10193 EXPORT_SYMBOL(atomic64_add);
10194+EXPORT_SYMBOL(atomic64_add_unchecked);
10195 EXPORT_SYMBOL(atomic64_add_ret);
10196+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
10197 EXPORT_SYMBOL(atomic64_sub);
10198+EXPORT_SYMBOL(atomic64_sub_unchecked);
10199 EXPORT_SYMBOL(atomic64_sub_ret);
10200 EXPORT_SYMBOL(atomic64_dec_if_positive);
10201
10202diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
10203index 30c3ecc..736f015 100644
10204--- a/arch/sparc/mm/Makefile
10205+++ b/arch/sparc/mm/Makefile
10206@@ -2,7 +2,7 @@
10207 #
10208
10209 asflags-y := -ansi
10210-ccflags-y := -Werror
10211+#ccflags-y := -Werror
10212
10213 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
10214 obj-y += fault_$(BITS).o
10215diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
10216index e98bfda..ea8d221 100644
10217--- a/arch/sparc/mm/fault_32.c
10218+++ b/arch/sparc/mm/fault_32.c
10219@@ -21,6 +21,9 @@
10220 #include <linux/perf_event.h>
10221 #include <linux/interrupt.h>
10222 #include <linux/kdebug.h>
10223+#include <linux/slab.h>
10224+#include <linux/pagemap.h>
10225+#include <linux/compiler.h>
10226
10227 #include <asm/page.h>
10228 #include <asm/pgtable.h>
10229@@ -159,6 +162,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
10230 return safe_compute_effective_address(regs, insn);
10231 }
10232
10233+#ifdef CONFIG_PAX_PAGEEXEC
10234+#ifdef CONFIG_PAX_DLRESOLVE
10235+static void pax_emuplt_close(struct vm_area_struct *vma)
10236+{
10237+ vma->vm_mm->call_dl_resolve = 0UL;
10238+}
10239+
10240+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
10241+{
10242+ unsigned int *kaddr;
10243+
10244+ vmf->page = alloc_page(GFP_HIGHUSER);
10245+ if (!vmf->page)
10246+ return VM_FAULT_OOM;
10247+
10248+ kaddr = kmap(vmf->page);
10249+ memset(kaddr, 0, PAGE_SIZE);
10250+ kaddr[0] = 0x9DE3BFA8U; /* save */
10251+ flush_dcache_page(vmf->page);
10252+ kunmap(vmf->page);
10253+ return VM_FAULT_MAJOR;
10254+}
10255+
10256+static const struct vm_operations_struct pax_vm_ops = {
10257+ .close = pax_emuplt_close,
10258+ .fault = pax_emuplt_fault
10259+};
10260+
10261+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
10262+{
10263+ int ret;
10264+
10265+ INIT_LIST_HEAD(&vma->anon_vma_chain);
10266+ vma->vm_mm = current->mm;
10267+ vma->vm_start = addr;
10268+ vma->vm_end = addr + PAGE_SIZE;
10269+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
10270+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
10271+ vma->vm_ops = &pax_vm_ops;
10272+
10273+ ret = insert_vm_struct(current->mm, vma);
10274+ if (ret)
10275+ return ret;
10276+
10277+ ++current->mm->total_vm;
10278+ return 0;
10279+}
10280+#endif
10281+
10282+/*
10283+ * PaX: decide what to do with offenders (regs->pc = fault address)
10284+ *
10285+ * returns 1 when task should be killed
10286+ * 2 when patched PLT trampoline was detected
10287+ * 3 when unpatched PLT trampoline was detected
10288+ */
10289+static int pax_handle_fetch_fault(struct pt_regs *regs)
10290+{
10291+
10292+#ifdef CONFIG_PAX_EMUPLT
10293+ int err;
10294+
10295+ do { /* PaX: patched PLT emulation #1 */
10296+ unsigned int sethi1, sethi2, jmpl;
10297+
10298+ err = get_user(sethi1, (unsigned int *)regs->pc);
10299+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
10300+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
10301+
10302+ if (err)
10303+ break;
10304+
10305+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
10306+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
10307+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
10308+ {
10309+ unsigned int addr;
10310+
10311+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
10312+ addr = regs->u_regs[UREG_G1];
10313+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
10314+ regs->pc = addr;
10315+ regs->npc = addr+4;
10316+ return 2;
10317+ }
10318+ } while (0);
10319+
10320+ do { /* PaX: patched PLT emulation #2 */
10321+ unsigned int ba;
10322+
10323+ err = get_user(ba, (unsigned int *)regs->pc);
10324+
10325+ if (err)
10326+ break;
10327+
10328+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
10329+ unsigned int addr;
10330+
10331+ if ((ba & 0xFFC00000U) == 0x30800000U)
10332+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
10333+ else
10334+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
10335+ regs->pc = addr;
10336+ regs->npc = addr+4;
10337+ return 2;
10338+ }
10339+ } while (0);
10340+
10341+ do { /* PaX: patched PLT emulation #3 */
10342+ unsigned int sethi, bajmpl, nop;
10343+
10344+ err = get_user(sethi, (unsigned int *)regs->pc);
10345+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
10346+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
10347+
10348+ if (err)
10349+ break;
10350+
10351+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10352+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
10353+ nop == 0x01000000U)
10354+ {
10355+ unsigned int addr;
10356+
10357+ addr = (sethi & 0x003FFFFFU) << 10;
10358+ regs->u_regs[UREG_G1] = addr;
10359+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
10360+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
10361+ else
10362+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
10363+ regs->pc = addr;
10364+ regs->npc = addr+4;
10365+ return 2;
10366+ }
10367+ } while (0);
10368+
10369+ do { /* PaX: unpatched PLT emulation step 1 */
10370+ unsigned int sethi, ba, nop;
10371+
10372+ err = get_user(sethi, (unsigned int *)regs->pc);
10373+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
10374+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
10375+
10376+ if (err)
10377+ break;
10378+
10379+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10380+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
10381+ nop == 0x01000000U)
10382+ {
10383+ unsigned int addr, save, call;
10384+
10385+ if ((ba & 0xFFC00000U) == 0x30800000U)
10386+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
10387+ else
10388+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
10389+
10390+ err = get_user(save, (unsigned int *)addr);
10391+ err |= get_user(call, (unsigned int *)(addr+4));
10392+ err |= get_user(nop, (unsigned int *)(addr+8));
10393+ if (err)
10394+ break;
10395+
10396+#ifdef CONFIG_PAX_DLRESOLVE
10397+ if (save == 0x9DE3BFA8U &&
10398+ (call & 0xC0000000U) == 0x40000000U &&
10399+ nop == 0x01000000U)
10400+ {
10401+ struct vm_area_struct *vma;
10402+ unsigned long call_dl_resolve;
10403+
10404+ down_read(&current->mm->mmap_sem);
10405+ call_dl_resolve = current->mm->call_dl_resolve;
10406+ up_read(&current->mm->mmap_sem);
10407+ if (likely(call_dl_resolve))
10408+ goto emulate;
10409+
10410+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
10411+
10412+ down_write(&current->mm->mmap_sem);
10413+ if (current->mm->call_dl_resolve) {
10414+ call_dl_resolve = current->mm->call_dl_resolve;
10415+ up_write(&current->mm->mmap_sem);
10416+ if (vma)
10417+ kmem_cache_free(vm_area_cachep, vma);
10418+ goto emulate;
10419+ }
10420+
10421+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
10422+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
10423+ up_write(&current->mm->mmap_sem);
10424+ if (vma)
10425+ kmem_cache_free(vm_area_cachep, vma);
10426+ return 1;
10427+ }
10428+
10429+ if (pax_insert_vma(vma, call_dl_resolve)) {
10430+ up_write(&current->mm->mmap_sem);
10431+ kmem_cache_free(vm_area_cachep, vma);
10432+ return 1;
10433+ }
10434+
10435+ current->mm->call_dl_resolve = call_dl_resolve;
10436+ up_write(&current->mm->mmap_sem);
10437+
10438+emulate:
10439+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
10440+ regs->pc = call_dl_resolve;
10441+ regs->npc = addr+4;
10442+ return 3;
10443+ }
10444+#endif
10445+
10446+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
10447+ if ((save & 0xFFC00000U) == 0x05000000U &&
10448+ (call & 0xFFFFE000U) == 0x85C0A000U &&
10449+ nop == 0x01000000U)
10450+ {
10451+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
10452+ regs->u_regs[UREG_G2] = addr + 4;
10453+ addr = (save & 0x003FFFFFU) << 10;
10454+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
10455+ regs->pc = addr;
10456+ regs->npc = addr+4;
10457+ return 3;
10458+ }
10459+ }
10460+ } while (0);
10461+
10462+ do { /* PaX: unpatched PLT emulation step 2 */
10463+ unsigned int save, call, nop;
10464+
10465+ err = get_user(save, (unsigned int *)(regs->pc-4));
10466+ err |= get_user(call, (unsigned int *)regs->pc);
10467+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
10468+ if (err)
10469+ break;
10470+
10471+ if (save == 0x9DE3BFA8U &&
10472+ (call & 0xC0000000U) == 0x40000000U &&
10473+ nop == 0x01000000U)
10474+ {
10475+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
10476+
10477+ regs->u_regs[UREG_RETPC] = regs->pc;
10478+ regs->pc = dl_resolve;
10479+ regs->npc = dl_resolve+4;
10480+ return 3;
10481+ }
10482+ } while (0);
10483+#endif
10484+
10485+ return 1;
10486+}
10487+
10488+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
10489+{
10490+ unsigned long i;
10491+
10492+ printk(KERN_ERR "PAX: bytes at PC: ");
10493+ for (i = 0; i < 8; i++) {
10494+ unsigned int c;
10495+ if (get_user(c, (unsigned int *)pc+i))
10496+ printk(KERN_CONT "???????? ");
10497+ else
10498+ printk(KERN_CONT "%08x ", c);
10499+ }
10500+ printk("\n");
10501+}
10502+#endif
10503+
10504 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
10505 int text_fault)
10506 {
10507@@ -230,6 +504,24 @@ good_area:
10508 if (!(vma->vm_flags & VM_WRITE))
10509 goto bad_area;
10510 } else {
10511+
10512+#ifdef CONFIG_PAX_PAGEEXEC
10513+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
10514+ up_read(&mm->mmap_sem);
10515+ switch (pax_handle_fetch_fault(regs)) {
10516+
10517+#ifdef CONFIG_PAX_EMUPLT
10518+ case 2:
10519+ case 3:
10520+ return;
10521+#endif
10522+
10523+ }
10524+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
10525+ do_group_exit(SIGKILL);
10526+ }
10527+#endif
10528+
10529 /* Allow reads even for write-only mappings */
10530 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
10531 goto bad_area;
10532diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
10533index 5062ff3..e0b75f3 100644
10534--- a/arch/sparc/mm/fault_64.c
10535+++ b/arch/sparc/mm/fault_64.c
10536@@ -21,6 +21,9 @@
10537 #include <linux/kprobes.h>
10538 #include <linux/kdebug.h>
10539 #include <linux/percpu.h>
10540+#include <linux/slab.h>
10541+#include <linux/pagemap.h>
10542+#include <linux/compiler.h>
10543
10544 #include <asm/page.h>
10545 #include <asm/pgtable.h>
10546@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
10547 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
10548 regs->tpc);
10549 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
10550- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
10551+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
10552 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
10553 dump_stack();
10554 unhandled_fault(regs->tpc, current, regs);
10555@@ -270,6 +273,466 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
10556 show_regs(regs);
10557 }
10558
10559+#ifdef CONFIG_PAX_PAGEEXEC
10560+#ifdef CONFIG_PAX_DLRESOLVE
10561+static void pax_emuplt_close(struct vm_area_struct *vma)
10562+{
10563+ vma->vm_mm->call_dl_resolve = 0UL;
10564+}
10565+
10566+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
10567+{
10568+ unsigned int *kaddr;
10569+
10570+ vmf->page = alloc_page(GFP_HIGHUSER);
10571+ if (!vmf->page)
10572+ return VM_FAULT_OOM;
10573+
10574+ kaddr = kmap(vmf->page);
10575+ memset(kaddr, 0, PAGE_SIZE);
10576+ kaddr[0] = 0x9DE3BFA8U; /* save */
10577+ flush_dcache_page(vmf->page);
10578+ kunmap(vmf->page);
10579+ return VM_FAULT_MAJOR;
10580+}
10581+
10582+static const struct vm_operations_struct pax_vm_ops = {
10583+ .close = pax_emuplt_close,
10584+ .fault = pax_emuplt_fault
10585+};
10586+
10587+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
10588+{
10589+ int ret;
10590+
10591+ INIT_LIST_HEAD(&vma->anon_vma_chain);
10592+ vma->vm_mm = current->mm;
10593+ vma->vm_start = addr;
10594+ vma->vm_end = addr + PAGE_SIZE;
10595+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
10596+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
10597+ vma->vm_ops = &pax_vm_ops;
10598+
10599+ ret = insert_vm_struct(current->mm, vma);
10600+ if (ret)
10601+ return ret;
10602+
10603+ ++current->mm->total_vm;
10604+ return 0;
10605+}
10606+#endif
10607+
10608+/*
10609+ * PaX: decide what to do with offenders (regs->tpc = fault address)
10610+ *
10611+ * returns 1 when task should be killed
10612+ * 2 when patched PLT trampoline was detected
10613+ * 3 when unpatched PLT trampoline was detected
10614+ */
10615+static int pax_handle_fetch_fault(struct pt_regs *regs)
10616+{
10617+
10618+#ifdef CONFIG_PAX_EMUPLT
10619+ int err;
10620+
10621+ do { /* PaX: patched PLT emulation #1 */
10622+ unsigned int sethi1, sethi2, jmpl;
10623+
10624+ err = get_user(sethi1, (unsigned int *)regs->tpc);
10625+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
10626+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
10627+
10628+ if (err)
10629+ break;
10630+
10631+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
10632+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
10633+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
10634+ {
10635+ unsigned long addr;
10636+
10637+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
10638+ addr = regs->u_regs[UREG_G1];
10639+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
10640+
10641+ if (test_thread_flag(TIF_32BIT))
10642+ addr &= 0xFFFFFFFFUL;
10643+
10644+ regs->tpc = addr;
10645+ regs->tnpc = addr+4;
10646+ return 2;
10647+ }
10648+ } while (0);
10649+
10650+ do { /* PaX: patched PLT emulation #2 */
10651+ unsigned int ba;
10652+
10653+ err = get_user(ba, (unsigned int *)regs->tpc);
10654+
10655+ if (err)
10656+ break;
10657+
10658+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
10659+ unsigned long addr;
10660+
10661+ if ((ba & 0xFFC00000U) == 0x30800000U)
10662+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
10663+ else
10664+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
10665+
10666+ if (test_thread_flag(TIF_32BIT))
10667+ addr &= 0xFFFFFFFFUL;
10668+
10669+ regs->tpc = addr;
10670+ regs->tnpc = addr+4;
10671+ return 2;
10672+ }
10673+ } while (0);
10674+
10675+ do { /* PaX: patched PLT emulation #3 */
10676+ unsigned int sethi, bajmpl, nop;
10677+
10678+ err = get_user(sethi, (unsigned int *)regs->tpc);
10679+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
10680+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
10681+
10682+ if (err)
10683+ break;
10684+
10685+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10686+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
10687+ nop == 0x01000000U)
10688+ {
10689+ unsigned long addr;
10690+
10691+ addr = (sethi & 0x003FFFFFU) << 10;
10692+ regs->u_regs[UREG_G1] = addr;
10693+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
10694+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
10695+ else
10696+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
10697+
10698+ if (test_thread_flag(TIF_32BIT))
10699+ addr &= 0xFFFFFFFFUL;
10700+
10701+ regs->tpc = addr;
10702+ regs->tnpc = addr+4;
10703+ return 2;
10704+ }
10705+ } while (0);
10706+
10707+ do { /* PaX: patched PLT emulation #4 */
10708+ unsigned int sethi, mov1, call, mov2;
10709+
10710+ err = get_user(sethi, (unsigned int *)regs->tpc);
10711+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
10712+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
10713+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
10714+
10715+ if (err)
10716+ break;
10717+
10718+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10719+ mov1 == 0x8210000FU &&
10720+ (call & 0xC0000000U) == 0x40000000U &&
10721+ mov2 == 0x9E100001U)
10722+ {
10723+ unsigned long addr;
10724+
10725+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
10726+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
10727+
10728+ if (test_thread_flag(TIF_32BIT))
10729+ addr &= 0xFFFFFFFFUL;
10730+
10731+ regs->tpc = addr;
10732+ regs->tnpc = addr+4;
10733+ return 2;
10734+ }
10735+ } while (0);
10736+
10737+ do { /* PaX: patched PLT emulation #5 */
10738+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
10739+
10740+ err = get_user(sethi, (unsigned int *)regs->tpc);
10741+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
10742+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
10743+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
10744+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
10745+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
10746+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
10747+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
10748+
10749+ if (err)
10750+ break;
10751+
10752+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10753+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
10754+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
10755+ (or1 & 0xFFFFE000U) == 0x82106000U &&
10756+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
10757+ sllx == 0x83287020U &&
10758+ jmpl == 0x81C04005U &&
10759+ nop == 0x01000000U)
10760+ {
10761+ unsigned long addr;
10762+
10763+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
10764+ regs->u_regs[UREG_G1] <<= 32;
10765+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
10766+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
10767+ regs->tpc = addr;
10768+ regs->tnpc = addr+4;
10769+ return 2;
10770+ }
10771+ } while (0);
10772+
10773+ do { /* PaX: patched PLT emulation #6 */
10774+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
10775+
10776+ err = get_user(sethi, (unsigned int *)regs->tpc);
10777+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
10778+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
10779+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
10780+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
10781+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
10782+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
10783+
10784+ if (err)
10785+ break;
10786+
10787+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10788+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
10789+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
10790+ sllx == 0x83287020U &&
10791+ (or & 0xFFFFE000U) == 0x8A116000U &&
10792+ jmpl == 0x81C04005U &&
10793+ nop == 0x01000000U)
10794+ {
10795+ unsigned long addr;
10796+
10797+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
10798+ regs->u_regs[UREG_G1] <<= 32;
10799+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
10800+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
10801+ regs->tpc = addr;
10802+ regs->tnpc = addr+4;
10803+ return 2;
10804+ }
10805+ } while (0);
10806+
10807+ do { /* PaX: unpatched PLT emulation step 1 */
10808+ unsigned int sethi, ba, nop;
10809+
10810+ err = get_user(sethi, (unsigned int *)regs->tpc);
10811+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
10812+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
10813+
10814+ if (err)
10815+ break;
10816+
10817+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10818+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
10819+ nop == 0x01000000U)
10820+ {
10821+ unsigned long addr;
10822+ unsigned int save, call;
10823+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
10824+
10825+ if ((ba & 0xFFC00000U) == 0x30800000U)
10826+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
10827+ else
10828+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
10829+
10830+ if (test_thread_flag(TIF_32BIT))
10831+ addr &= 0xFFFFFFFFUL;
10832+
10833+ err = get_user(save, (unsigned int *)addr);
10834+ err |= get_user(call, (unsigned int *)(addr+4));
10835+ err |= get_user(nop, (unsigned int *)(addr+8));
10836+ if (err)
10837+ break;
10838+
10839+#ifdef CONFIG_PAX_DLRESOLVE
10840+ if (save == 0x9DE3BFA8U &&
10841+ (call & 0xC0000000U) == 0x40000000U &&
10842+ nop == 0x01000000U)
10843+ {
10844+ struct vm_area_struct *vma;
10845+ unsigned long call_dl_resolve;
10846+
10847+ down_read(&current->mm->mmap_sem);
10848+ call_dl_resolve = current->mm->call_dl_resolve;
10849+ up_read(&current->mm->mmap_sem);
10850+ if (likely(call_dl_resolve))
10851+ goto emulate;
10852+
10853+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
10854+
10855+ down_write(&current->mm->mmap_sem);
10856+ if (current->mm->call_dl_resolve) {
10857+ call_dl_resolve = current->mm->call_dl_resolve;
10858+ up_write(&current->mm->mmap_sem);
10859+ if (vma)
10860+ kmem_cache_free(vm_area_cachep, vma);
10861+ goto emulate;
10862+ }
10863+
10864+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
10865+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
10866+ up_write(&current->mm->mmap_sem);
10867+ if (vma)
10868+ kmem_cache_free(vm_area_cachep, vma);
10869+ return 1;
10870+ }
10871+
10872+ if (pax_insert_vma(vma, call_dl_resolve)) {
10873+ up_write(&current->mm->mmap_sem);
10874+ kmem_cache_free(vm_area_cachep, vma);
10875+ return 1;
10876+ }
10877+
10878+ current->mm->call_dl_resolve = call_dl_resolve;
10879+ up_write(&current->mm->mmap_sem);
10880+
10881+emulate:
10882+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
10883+ regs->tpc = call_dl_resolve;
10884+ regs->tnpc = addr+4;
10885+ return 3;
10886+ }
10887+#endif
10888+
10889+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
10890+ if ((save & 0xFFC00000U) == 0x05000000U &&
10891+ (call & 0xFFFFE000U) == 0x85C0A000U &&
10892+ nop == 0x01000000U)
10893+ {
10894+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
10895+ regs->u_regs[UREG_G2] = addr + 4;
10896+ addr = (save & 0x003FFFFFU) << 10;
10897+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
10898+
10899+ if (test_thread_flag(TIF_32BIT))
10900+ addr &= 0xFFFFFFFFUL;
10901+
10902+ regs->tpc = addr;
10903+ regs->tnpc = addr+4;
10904+ return 3;
10905+ }
10906+
10907+ /* PaX: 64-bit PLT stub */
10908+ err = get_user(sethi1, (unsigned int *)addr);
10909+ err |= get_user(sethi2, (unsigned int *)(addr+4));
10910+ err |= get_user(or1, (unsigned int *)(addr+8));
10911+ err |= get_user(or2, (unsigned int *)(addr+12));
10912+ err |= get_user(sllx, (unsigned int *)(addr+16));
10913+ err |= get_user(add, (unsigned int *)(addr+20));
10914+ err |= get_user(jmpl, (unsigned int *)(addr+24));
10915+ err |= get_user(nop, (unsigned int *)(addr+28));
10916+ if (err)
10917+ break;
10918+
10919+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
10920+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
10921+ (or1 & 0xFFFFE000U) == 0x88112000U &&
10922+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
10923+ sllx == 0x89293020U &&
10924+ add == 0x8A010005U &&
10925+ jmpl == 0x89C14000U &&
10926+ nop == 0x01000000U)
10927+ {
10928+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
10929+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
10930+ regs->u_regs[UREG_G4] <<= 32;
10931+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
10932+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
10933+ regs->u_regs[UREG_G4] = addr + 24;
10934+ addr = regs->u_regs[UREG_G5];
10935+ regs->tpc = addr;
10936+ regs->tnpc = addr+4;
10937+ return 3;
10938+ }
10939+ }
10940+ } while (0);
10941+
10942+#ifdef CONFIG_PAX_DLRESOLVE
10943+ do { /* PaX: unpatched PLT emulation step 2 */
10944+ unsigned int save, call, nop;
10945+
10946+ err = get_user(save, (unsigned int *)(regs->tpc-4));
10947+ err |= get_user(call, (unsigned int *)regs->tpc);
10948+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
10949+ if (err)
10950+ break;
10951+
10952+ if (save == 0x9DE3BFA8U &&
10953+ (call & 0xC0000000U) == 0x40000000U &&
10954+ nop == 0x01000000U)
10955+ {
10956+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
10957+
10958+ if (test_thread_flag(TIF_32BIT))
10959+ dl_resolve &= 0xFFFFFFFFUL;
10960+
10961+ regs->u_regs[UREG_RETPC] = regs->tpc;
10962+ regs->tpc = dl_resolve;
10963+ regs->tnpc = dl_resolve+4;
10964+ return 3;
10965+ }
10966+ } while (0);
10967+#endif
10968+
10969+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
10970+ unsigned int sethi, ba, nop;
10971+
10972+ err = get_user(sethi, (unsigned int *)regs->tpc);
10973+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
10974+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
10975+
10976+ if (err)
10977+ break;
10978+
10979+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10980+ (ba & 0xFFF00000U) == 0x30600000U &&
10981+ nop == 0x01000000U)
10982+ {
10983+ unsigned long addr;
10984+
10985+ addr = (sethi & 0x003FFFFFU) << 10;
10986+ regs->u_regs[UREG_G1] = addr;
10987+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
10988+
10989+ if (test_thread_flag(TIF_32BIT))
10990+ addr &= 0xFFFFFFFFUL;
10991+
10992+ regs->tpc = addr;
10993+ regs->tnpc = addr+4;
10994+ return 2;
10995+ }
10996+ } while (0);
10997+
10998+#endif
10999+
11000+ return 1;
11001+}
11002+
11003+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
11004+{
11005+ unsigned long i;
11006+
11007+ printk(KERN_ERR "PAX: bytes at PC: ");
11008+ for (i = 0; i < 8; i++) {
11009+ unsigned int c;
11010+ if (get_user(c, (unsigned int *)pc+i))
11011+ printk(KERN_CONT "???????? ");
11012+ else
11013+ printk(KERN_CONT "%08x ", c);
11014+ }
11015+ printk("\n");
11016+}
11017+#endif
11018+
11019 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
11020 {
11021 struct mm_struct *mm = current->mm;
11022@@ -341,6 +804,29 @@ retry:
11023 if (!vma)
11024 goto bad_area;
11025
11026+#ifdef CONFIG_PAX_PAGEEXEC
11027+ /* PaX: detect ITLB misses on non-exec pages */
11028+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
11029+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
11030+ {
11031+ if (address != regs->tpc)
11032+ goto good_area;
11033+
11034+ up_read(&mm->mmap_sem);
11035+ switch (pax_handle_fetch_fault(regs)) {
11036+
11037+#ifdef CONFIG_PAX_EMUPLT
11038+ case 2:
11039+ case 3:
11040+ return;
11041+#endif
11042+
11043+ }
11044+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
11045+ do_group_exit(SIGKILL);
11046+ }
11047+#endif
11048+
11049 /* Pure DTLB misses do not tell us whether the fault causing
11050 * load/store/atomic was a write or not, it only says that there
11051 * was no match. So in such a case we (carefully) read the
11052diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
11053index d2b5944..d878f3c 100644
11054--- a/arch/sparc/mm/hugetlbpage.c
11055+++ b/arch/sparc/mm/hugetlbpage.c
11056@@ -28,7 +28,8 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
11057 unsigned long addr,
11058 unsigned long len,
11059 unsigned long pgoff,
11060- unsigned long flags)
11061+ unsigned long flags,
11062+ unsigned long offset)
11063 {
11064 unsigned long task_size = TASK_SIZE;
11065 struct vm_unmapped_area_info info;
11066@@ -38,15 +39,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
11067
11068 info.flags = 0;
11069 info.length = len;
11070- info.low_limit = TASK_UNMAPPED_BASE;
11071+ info.low_limit = mm->mmap_base;
11072 info.high_limit = min(task_size, VA_EXCLUDE_START);
11073 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
11074 info.align_offset = 0;
11075+ info.threadstack_offset = offset;
11076 addr = vm_unmapped_area(&info);
11077
11078 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
11079 VM_BUG_ON(addr != -ENOMEM);
11080 info.low_limit = VA_EXCLUDE_END;
11081+
11082+#ifdef CONFIG_PAX_RANDMMAP
11083+ if (mm->pax_flags & MF_PAX_RANDMMAP)
11084+ info.low_limit += mm->delta_mmap;
11085+#endif
11086+
11087 info.high_limit = task_size;
11088 addr = vm_unmapped_area(&info);
11089 }
11090@@ -58,7 +66,8 @@ static unsigned long
11091 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11092 const unsigned long len,
11093 const unsigned long pgoff,
11094- const unsigned long flags)
11095+ const unsigned long flags,
11096+ const unsigned long offset)
11097 {
11098 struct mm_struct *mm = current->mm;
11099 unsigned long addr = addr0;
11100@@ -73,6 +82,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11101 info.high_limit = mm->mmap_base;
11102 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
11103 info.align_offset = 0;
11104+ info.threadstack_offset = offset;
11105 addr = vm_unmapped_area(&info);
11106
11107 /*
11108@@ -85,6 +95,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11109 VM_BUG_ON(addr != -ENOMEM);
11110 info.flags = 0;
11111 info.low_limit = TASK_UNMAPPED_BASE;
11112+
11113+#ifdef CONFIG_PAX_RANDMMAP
11114+ if (mm->pax_flags & MF_PAX_RANDMMAP)
11115+ info.low_limit += mm->delta_mmap;
11116+#endif
11117+
11118 info.high_limit = STACK_TOP32;
11119 addr = vm_unmapped_area(&info);
11120 }
11121@@ -99,6 +115,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
11122 struct mm_struct *mm = current->mm;
11123 struct vm_area_struct *vma;
11124 unsigned long task_size = TASK_SIZE;
11125+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
11126
11127 if (test_thread_flag(TIF_32BIT))
11128 task_size = STACK_TOP32;
11129@@ -114,19 +131,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
11130 return addr;
11131 }
11132
11133+#ifdef CONFIG_PAX_RANDMMAP
11134+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
11135+#endif
11136+
11137 if (addr) {
11138 addr = ALIGN(addr, HPAGE_SIZE);
11139 vma = find_vma(mm, addr);
11140- if (task_size - len >= addr &&
11141- (!vma || addr + len <= vma->vm_start))
11142+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
11143 return addr;
11144 }
11145 if (mm->get_unmapped_area == arch_get_unmapped_area)
11146 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
11147- pgoff, flags);
11148+ pgoff, flags, offset);
11149 else
11150 return hugetlb_get_unmapped_area_topdown(file, addr, len,
11151- pgoff, flags);
11152+ pgoff, flags, offset);
11153 }
11154
11155 pte_t *huge_pte_alloc(struct mm_struct *mm,
11156diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
11157index f4500c6..889656c 100644
11158--- a/arch/tile/include/asm/atomic_64.h
11159+++ b/arch/tile/include/asm/atomic_64.h
11160@@ -143,6 +143,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
11161
11162 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
11163
11164+#define atomic64_read_unchecked(v) atomic64_read(v)
11165+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
11166+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
11167+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
11168+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
11169+#define atomic64_inc_unchecked(v) atomic64_inc(v)
11170+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
11171+#define atomic64_dec_unchecked(v) atomic64_dec(v)
11172+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
11173+
11174 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
11175 #define smp_mb__before_atomic_dec() smp_mb()
11176 #define smp_mb__after_atomic_dec() smp_mb()
11177diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
11178index a9a5299..0fce79e 100644
11179--- a/arch/tile/include/asm/cache.h
11180+++ b/arch/tile/include/asm/cache.h
11181@@ -15,11 +15,12 @@
11182 #ifndef _ASM_TILE_CACHE_H
11183 #define _ASM_TILE_CACHE_H
11184
11185+#include <linux/const.h>
11186 #include <arch/chip.h>
11187
11188 /* bytes per L1 data cache line */
11189 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
11190-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
11191+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
11192
11193 /* bytes per L2 cache line */
11194 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
11195diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
11196index 8a082bc..7a6bf87 100644
11197--- a/arch/tile/include/asm/uaccess.h
11198+++ b/arch/tile/include/asm/uaccess.h
11199@@ -408,9 +408,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
11200 const void __user *from,
11201 unsigned long n)
11202 {
11203- int sz = __compiletime_object_size(to);
11204+ size_t sz = __compiletime_object_size(to);
11205
11206- if (likely(sz == -1 || sz >= n))
11207+ if (likely(sz == (size_t)-1 || sz >= n))
11208 n = _copy_from_user(to, from, n);
11209 else
11210 copy_from_user_overflow();
11211diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
11212index 650ccff..45fe2d6 100644
11213--- a/arch/tile/mm/hugetlbpage.c
11214+++ b/arch/tile/mm/hugetlbpage.c
11215@@ -239,6 +239,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
11216 info.high_limit = TASK_SIZE;
11217 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
11218 info.align_offset = 0;
11219+ info.threadstack_offset = 0;
11220 return vm_unmapped_area(&info);
11221 }
11222
11223@@ -256,6 +257,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
11224 info.high_limit = current->mm->mmap_base;
11225 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
11226 info.align_offset = 0;
11227+ info.threadstack_offset = 0;
11228 addr = vm_unmapped_area(&info);
11229
11230 /*
11231diff --git a/arch/um/Makefile b/arch/um/Makefile
11232index 133f7de..1d6f2f1 100644
11233--- a/arch/um/Makefile
11234+++ b/arch/um/Makefile
11235@@ -62,6 +62,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
11236 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
11237 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
11238
11239+ifdef CONSTIFY_PLUGIN
11240+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
11241+endif
11242+
11243 #This will adjust *FLAGS accordingly to the platform.
11244 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
11245
11246diff --git a/arch/um/defconfig b/arch/um/defconfig
11247index 08107a7..ab22afe 100644
11248--- a/arch/um/defconfig
11249+++ b/arch/um/defconfig
11250@@ -51,7 +51,6 @@ CONFIG_X86_CMPXCHG=y
11251 CONFIG_X86_L1_CACHE_SHIFT=5
11252 CONFIG_X86_XADD=y
11253 CONFIG_X86_PPRO_FENCE=y
11254-CONFIG_X86_WP_WORKS_OK=y
11255 CONFIG_X86_INVLPG=y
11256 CONFIG_X86_BSWAP=y
11257 CONFIG_X86_POPAD_OK=y
11258diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
11259index 19e1bdd..3665b77 100644
11260--- a/arch/um/include/asm/cache.h
11261+++ b/arch/um/include/asm/cache.h
11262@@ -1,6 +1,7 @@
11263 #ifndef __UM_CACHE_H
11264 #define __UM_CACHE_H
11265
11266+#include <linux/const.h>
11267
11268 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
11269 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
11270@@ -12,6 +13,6 @@
11271 # define L1_CACHE_SHIFT 5
11272 #endif
11273
11274-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
11275+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
11276
11277 #endif
11278diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
11279index 2e0a6b1..a64d0f5 100644
11280--- a/arch/um/include/asm/kmap_types.h
11281+++ b/arch/um/include/asm/kmap_types.h
11282@@ -8,6 +8,6 @@
11283
11284 /* No more #include "asm/arch/kmap_types.h" ! */
11285
11286-#define KM_TYPE_NR 14
11287+#define KM_TYPE_NR 15
11288
11289 #endif
11290diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
11291index 5ff53d9..5850cdf 100644
11292--- a/arch/um/include/asm/page.h
11293+++ b/arch/um/include/asm/page.h
11294@@ -14,6 +14,9 @@
11295 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
11296 #define PAGE_MASK (~(PAGE_SIZE-1))
11297
11298+#define ktla_ktva(addr) (addr)
11299+#define ktva_ktla(addr) (addr)
11300+
11301 #ifndef __ASSEMBLY__
11302
11303 struct page;
11304diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
11305index 0032f92..cd151e0 100644
11306--- a/arch/um/include/asm/pgtable-3level.h
11307+++ b/arch/um/include/asm/pgtable-3level.h
11308@@ -58,6 +58,7 @@
11309 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
11310 #define pud_populate(mm, pud, pmd) \
11311 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
11312+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
11313
11314 #ifdef CONFIG_64BIT
11315 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
11316diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
11317index bbcef52..6a2a483 100644
11318--- a/arch/um/kernel/process.c
11319+++ b/arch/um/kernel/process.c
11320@@ -367,22 +367,6 @@ int singlestepping(void * t)
11321 return 2;
11322 }
11323
11324-/*
11325- * Only x86 and x86_64 have an arch_align_stack().
11326- * All other arches have "#define arch_align_stack(x) (x)"
11327- * in their asm/system.h
11328- * As this is included in UML from asm-um/system-generic.h,
11329- * we can use it to behave as the subarch does.
11330- */
11331-#ifndef arch_align_stack
11332-unsigned long arch_align_stack(unsigned long sp)
11333-{
11334- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
11335- sp -= get_random_int() % 8192;
11336- return sp & ~0xf;
11337-}
11338-#endif
11339-
11340 unsigned long get_wchan(struct task_struct *p)
11341 {
11342 unsigned long stack_page, sp, ip;
11343diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
11344index ad8f795..2c7eec6 100644
11345--- a/arch/unicore32/include/asm/cache.h
11346+++ b/arch/unicore32/include/asm/cache.h
11347@@ -12,8 +12,10 @@
11348 #ifndef __UNICORE_CACHE_H__
11349 #define __UNICORE_CACHE_H__
11350
11351-#define L1_CACHE_SHIFT (5)
11352-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
11353+#include <linux/const.h>
11354+
11355+#define L1_CACHE_SHIFT 5
11356+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
11357
11358 /*
11359 * Memory returned by kmalloc() may be used for DMA, so we must make
11360diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
11361index fe120da..24177f7 100644
11362--- a/arch/x86/Kconfig
11363+++ b/arch/x86/Kconfig
11364@@ -239,7 +239,7 @@ config X86_HT
11365
11366 config X86_32_LAZY_GS
11367 def_bool y
11368- depends on X86_32 && !CC_STACKPROTECTOR
11369+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
11370
11371 config ARCH_HWEIGHT_CFLAGS
11372 string
11373@@ -1073,6 +1073,7 @@ config MICROCODE_EARLY
11374
11375 config X86_MSR
11376 tristate "/dev/cpu/*/msr - Model-specific register support"
11377+ depends on !GRKERNSEC_KMEM
11378 ---help---
11379 This device gives privileged processes access to the x86
11380 Model-Specific Registers (MSRs). It is a character device with
11381@@ -1096,7 +1097,7 @@ choice
11382
11383 config NOHIGHMEM
11384 bool "off"
11385- depends on !X86_NUMAQ
11386+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11387 ---help---
11388 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
11389 However, the address space of 32-bit x86 processors is only 4
11390@@ -1133,7 +1134,7 @@ config NOHIGHMEM
11391
11392 config HIGHMEM4G
11393 bool "4GB"
11394- depends on !X86_NUMAQ
11395+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11396 ---help---
11397 Select this if you have a 32-bit processor and between 1 and 4
11398 gigabytes of physical RAM.
11399@@ -1186,7 +1187,7 @@ config PAGE_OFFSET
11400 hex
11401 default 0xB0000000 if VMSPLIT_3G_OPT
11402 default 0x80000000 if VMSPLIT_2G
11403- default 0x78000000 if VMSPLIT_2G_OPT
11404+ default 0x70000000 if VMSPLIT_2G_OPT
11405 default 0x40000000 if VMSPLIT_1G
11406 default 0xC0000000
11407 depends on X86_32
11408@@ -1584,6 +1585,7 @@ config SECCOMP
11409
11410 config CC_STACKPROTECTOR
11411 bool "Enable -fstack-protector buffer overflow detection"
11412+ depends on X86_64 || !PAX_MEMORY_UDEREF
11413 ---help---
11414 This option turns on the -fstack-protector GCC feature. This
11415 feature puts, at the beginning of functions, a canary value on
11416@@ -1703,6 +1705,8 @@ config X86_NEED_RELOCS
11417 config PHYSICAL_ALIGN
11418 hex "Alignment value to which kernel should be aligned" if X86_32
11419 default "0x1000000"
11420+ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
11421+ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
11422 range 0x2000 0x1000000
11423 ---help---
11424 This value puts the alignment restrictions on physical address
11425@@ -1778,9 +1782,10 @@ config DEBUG_HOTPLUG_CPU0
11426 If unsure, say N.
11427
11428 config COMPAT_VDSO
11429- def_bool y
11430+ def_bool n
11431 prompt "Compat VDSO support"
11432 depends on X86_32 || IA32_EMULATION
11433+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
11434 ---help---
11435 Map the 32-bit VDSO to the predictable old-style address too.
11436
11437diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
11438index c026cca..14657ae 100644
11439--- a/arch/x86/Kconfig.cpu
11440+++ b/arch/x86/Kconfig.cpu
11441@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
11442
11443 config X86_F00F_BUG
11444 def_bool y
11445- depends on M586MMX || M586TSC || M586 || M486
11446+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
11447
11448 config X86_INVD_BUG
11449 def_bool y
11450@@ -327,7 +327,7 @@ config X86_INVD_BUG
11451
11452 config X86_ALIGNMENT_16
11453 def_bool y
11454- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11455+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11456
11457 config X86_INTEL_USERCOPY
11458 def_bool y
11459@@ -373,7 +373,7 @@ config X86_CMPXCHG64
11460 # generates cmov.
11461 config X86_CMOV
11462 def_bool y
11463- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
11464+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
11465
11466 config X86_MINIMUM_CPU_FAMILY
11467 int
11468diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
11469index c198b7e..63eea60 100644
11470--- a/arch/x86/Kconfig.debug
11471+++ b/arch/x86/Kconfig.debug
11472@@ -84,7 +84,7 @@ config X86_PTDUMP
11473 config DEBUG_RODATA
11474 bool "Write protect kernel read-only data structures"
11475 default y
11476- depends on DEBUG_KERNEL
11477+ depends on DEBUG_KERNEL && BROKEN
11478 ---help---
11479 Mark the kernel read-only data as write-protected in the pagetables,
11480 in order to catch accidental (and incorrect) writes to such const
11481@@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
11482
11483 config DEBUG_SET_MODULE_RONX
11484 bool "Set loadable kernel module data as NX and text as RO"
11485- depends on MODULES
11486+ depends on MODULES && BROKEN
11487 ---help---
11488 This option helps catch unintended modifications to loadable
11489 kernel module's text and read-only data. It also prevents execution
11490diff --git a/arch/x86/Makefile b/arch/x86/Makefile
11491index 5c47726..8c4fa67 100644
11492--- a/arch/x86/Makefile
11493+++ b/arch/x86/Makefile
11494@@ -54,6 +54,7 @@ else
11495 UTS_MACHINE := x86_64
11496 CHECKFLAGS += -D__x86_64__ -m64
11497
11498+ biarch := $(call cc-option,-m64)
11499 KBUILD_AFLAGS += -m64
11500 KBUILD_CFLAGS += -m64
11501
11502@@ -234,3 +235,12 @@ define archhelp
11503 echo ' FDARGS="..." arguments for the booted kernel'
11504 echo ' FDINITRD=file initrd for the booted kernel'
11505 endef
11506+
11507+define OLD_LD
11508+
11509+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
11510+*** Please upgrade your binutils to 2.18 or newer
11511+endef
11512+
11513+archprepare:
11514+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
11515diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
11516index 379814b..add62ce 100644
11517--- a/arch/x86/boot/Makefile
11518+++ b/arch/x86/boot/Makefile
11519@@ -65,6 +65,9 @@ KBUILD_CFLAGS := $(USERINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
11520 $(call cc-option, -fno-stack-protector) \
11521 $(call cc-option, -mpreferred-stack-boundary=2)
11522 KBUILD_CFLAGS += $(call cc-option, -m32)
11523+ifdef CONSTIFY_PLUGIN
11524+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
11525+endif
11526 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11527 GCOV_PROFILE := n
11528
11529diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
11530index 878e4b9..20537ab 100644
11531--- a/arch/x86/boot/bitops.h
11532+++ b/arch/x86/boot/bitops.h
11533@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
11534 u8 v;
11535 const u32 *p = (const u32 *)addr;
11536
11537- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
11538+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
11539 return v;
11540 }
11541
11542@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
11543
11544 static inline void set_bit(int nr, void *addr)
11545 {
11546- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
11547+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
11548 }
11549
11550 #endif /* BOOT_BITOPS_H */
11551diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
11552index 5b75319..331a4ca 100644
11553--- a/arch/x86/boot/boot.h
11554+++ b/arch/x86/boot/boot.h
11555@@ -85,7 +85,7 @@ static inline void io_delay(void)
11556 static inline u16 ds(void)
11557 {
11558 u16 seg;
11559- asm("movw %%ds,%0" : "=rm" (seg));
11560+ asm volatile("movw %%ds,%0" : "=rm" (seg));
11561 return seg;
11562 }
11563
11564@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
11565 static inline int memcmp(const void *s1, const void *s2, size_t len)
11566 {
11567 u8 diff;
11568- asm("repe; cmpsb; setnz %0"
11569+ asm volatile("repe; cmpsb; setnz %0"
11570 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
11571 return diff;
11572 }
11573diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
11574index 5ef205c..342191d 100644
11575--- a/arch/x86/boot/compressed/Makefile
11576+++ b/arch/x86/boot/compressed/Makefile
11577@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
11578 KBUILD_CFLAGS += $(cflags-y)
11579 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
11580 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
11581+ifdef CONSTIFY_PLUGIN
11582+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
11583+endif
11584
11585 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11586 GCOV_PROFILE := n
11587diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
11588index d606463..b887794 100644
11589--- a/arch/x86/boot/compressed/eboot.c
11590+++ b/arch/x86/boot/compressed/eboot.c
11591@@ -150,7 +150,6 @@ again:
11592 *addr = max_addr;
11593 }
11594
11595-free_pool:
11596 efi_call_phys1(sys_table->boottime->free_pool, map);
11597
11598 fail:
11599@@ -214,7 +213,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align,
11600 if (i == map_size / desc_size)
11601 status = EFI_NOT_FOUND;
11602
11603-free_pool:
11604 efi_call_phys1(sys_table->boottime->free_pool, map);
11605 fail:
11606 return status;
11607diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
11608index a53440e..c3dbf1e 100644
11609--- a/arch/x86/boot/compressed/efi_stub_32.S
11610+++ b/arch/x86/boot/compressed/efi_stub_32.S
11611@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
11612 * parameter 2, ..., param n. To make things easy, we save the return
11613 * address of efi_call_phys in a global variable.
11614 */
11615- popl %ecx
11616- movl %ecx, saved_return_addr(%edx)
11617- /* get the function pointer into ECX*/
11618- popl %ecx
11619- movl %ecx, efi_rt_function_ptr(%edx)
11620+ popl saved_return_addr(%edx)
11621+ popl efi_rt_function_ptr(%edx)
11622
11623 /*
11624 * 3. Call the physical function.
11625 */
11626- call *%ecx
11627+ call *efi_rt_function_ptr(%edx)
11628
11629 /*
11630 * 4. Balance the stack. And because EAX contain the return value,
11631@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
11632 1: popl %edx
11633 subl $1b, %edx
11634
11635- movl efi_rt_function_ptr(%edx), %ecx
11636- pushl %ecx
11637+ pushl efi_rt_function_ptr(%edx)
11638
11639 /*
11640 * 10. Push the saved return address onto the stack and return.
11641 */
11642- movl saved_return_addr(%edx), %ecx
11643- pushl %ecx
11644- ret
11645+ jmpl *saved_return_addr(%edx)
11646 ENDPROC(efi_call_phys)
11647 .previous
11648
11649diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
11650index 1e3184f..0d11e2e 100644
11651--- a/arch/x86/boot/compressed/head_32.S
11652+++ b/arch/x86/boot/compressed/head_32.S
11653@@ -118,7 +118,7 @@ preferred_addr:
11654 notl %eax
11655 andl %eax, %ebx
11656 #else
11657- movl $LOAD_PHYSICAL_ADDR, %ebx
11658+ movl $____LOAD_PHYSICAL_ADDR, %ebx
11659 #endif
11660
11661 /* Target address to relocate to for decompression */
11662@@ -204,7 +204,7 @@ relocated:
11663 * and where it was actually loaded.
11664 */
11665 movl %ebp, %ebx
11666- subl $LOAD_PHYSICAL_ADDR, %ebx
11667+ subl $____LOAD_PHYSICAL_ADDR, %ebx
11668 jz 2f /* Nothing to be done if loaded at compiled addr. */
11669 /*
11670 * Process relocations.
11671@@ -212,8 +212,7 @@ relocated:
11672
11673 1: subl $4, %edi
11674 movl (%edi), %ecx
11675- testl %ecx, %ecx
11676- jz 2f
11677+ jecxz 2f
11678 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
11679 jmp 1b
11680 2:
11681diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
11682index 16f24e6..47491a3 100644
11683--- a/arch/x86/boot/compressed/head_64.S
11684+++ b/arch/x86/boot/compressed/head_64.S
11685@@ -97,7 +97,7 @@ ENTRY(startup_32)
11686 notl %eax
11687 andl %eax, %ebx
11688 #else
11689- movl $LOAD_PHYSICAL_ADDR, %ebx
11690+ movl $____LOAD_PHYSICAL_ADDR, %ebx
11691 #endif
11692
11693 /* Target address to relocate to for decompression */
11694@@ -272,7 +272,7 @@ preferred_addr:
11695 notq %rax
11696 andq %rax, %rbp
11697 #else
11698- movq $LOAD_PHYSICAL_ADDR, %rbp
11699+ movq $____LOAD_PHYSICAL_ADDR, %rbp
11700 #endif
11701
11702 /* Target address to relocate to for decompression */
11703@@ -363,8 +363,8 @@ gdt:
11704 .long gdt
11705 .word 0
11706 .quad 0x0000000000000000 /* NULL descriptor */
11707- .quad 0x00af9a000000ffff /* __KERNEL_CS */
11708- .quad 0x00cf92000000ffff /* __KERNEL_DS */
11709+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
11710+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
11711 .quad 0x0080890000000000 /* TS descriptor */
11712 .quad 0x0000000000000000 /* TS continued */
11713 gdt_end:
11714diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
11715index 7cb56c6..d382d84 100644
11716--- a/arch/x86/boot/compressed/misc.c
11717+++ b/arch/x86/boot/compressed/misc.c
11718@@ -303,7 +303,7 @@ static void parse_elf(void *output)
11719 case PT_LOAD:
11720 #ifdef CONFIG_RELOCATABLE
11721 dest = output;
11722- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
11723+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
11724 #else
11725 dest = (void *)(phdr->p_paddr);
11726 #endif
11727@@ -354,7 +354,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
11728 error("Destination address too large");
11729 #endif
11730 #ifndef CONFIG_RELOCATABLE
11731- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
11732+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
11733 error("Wrong destination address");
11734 #endif
11735
11736diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
11737index 4d3ff03..e4972ff 100644
11738--- a/arch/x86/boot/cpucheck.c
11739+++ b/arch/x86/boot/cpucheck.c
11740@@ -74,7 +74,7 @@ static int has_fpu(void)
11741 u16 fcw = -1, fsw = -1;
11742 u32 cr0;
11743
11744- asm("movl %%cr0,%0" : "=r" (cr0));
11745+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
11746 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
11747 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
11748 asm volatile("movl %0,%%cr0" : : "r" (cr0));
11749@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
11750 {
11751 u32 f0, f1;
11752
11753- asm("pushfl ; "
11754+ asm volatile("pushfl ; "
11755 "pushfl ; "
11756 "popl %0 ; "
11757 "movl %0,%1 ; "
11758@@ -115,7 +115,7 @@ static void get_flags(void)
11759 set_bit(X86_FEATURE_FPU, cpu.flags);
11760
11761 if (has_eflag(X86_EFLAGS_ID)) {
11762- asm("cpuid"
11763+ asm volatile("cpuid"
11764 : "=a" (max_intel_level),
11765 "=b" (cpu_vendor[0]),
11766 "=d" (cpu_vendor[1]),
11767@@ -124,7 +124,7 @@ static void get_flags(void)
11768
11769 if (max_intel_level >= 0x00000001 &&
11770 max_intel_level <= 0x0000ffff) {
11771- asm("cpuid"
11772+ asm volatile("cpuid"
11773 : "=a" (tfms),
11774 "=c" (cpu.flags[4]),
11775 "=d" (cpu.flags[0])
11776@@ -136,7 +136,7 @@ static void get_flags(void)
11777 cpu.model += ((tfms >> 16) & 0xf) << 4;
11778 }
11779
11780- asm("cpuid"
11781+ asm volatile("cpuid"
11782 : "=a" (max_amd_level)
11783 : "a" (0x80000000)
11784 : "ebx", "ecx", "edx");
11785@@ -144,7 +144,7 @@ static void get_flags(void)
11786 if (max_amd_level >= 0x80000001 &&
11787 max_amd_level <= 0x8000ffff) {
11788 u32 eax = 0x80000001;
11789- asm("cpuid"
11790+ asm volatile("cpuid"
11791 : "+a" (eax),
11792 "=c" (cpu.flags[6]),
11793 "=d" (cpu.flags[1])
11794@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
11795 u32 ecx = MSR_K7_HWCR;
11796 u32 eax, edx;
11797
11798- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
11799+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
11800 eax &= ~(1 << 15);
11801- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
11802+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
11803
11804 get_flags(); /* Make sure it really did something */
11805 err = check_flags();
11806@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
11807 u32 ecx = MSR_VIA_FCR;
11808 u32 eax, edx;
11809
11810- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
11811+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
11812 eax |= (1<<1)|(1<<7);
11813- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
11814+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
11815
11816 set_bit(X86_FEATURE_CX8, cpu.flags);
11817 err = check_flags();
11818@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
11819 u32 eax, edx;
11820 u32 level = 1;
11821
11822- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
11823- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
11824- asm("cpuid"
11825+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
11826+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
11827+ asm volatile("cpuid"
11828 : "+a" (level), "=d" (cpu.flags[0])
11829 : : "ecx", "ebx");
11830- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
11831+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
11832
11833 err = check_flags();
11834 }
11835diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
11836index 9ec06a1..2c25e79 100644
11837--- a/arch/x86/boot/header.S
11838+++ b/arch/x86/boot/header.S
11839@@ -409,10 +409,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
11840 # single linked list of
11841 # struct setup_data
11842
11843-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
11844+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
11845
11846 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
11847+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11848+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
11849+#else
11850 #define VO_INIT_SIZE (VO__end - VO__text)
11851+#endif
11852 #if ZO_INIT_SIZE > VO_INIT_SIZE
11853 #define INIT_SIZE ZO_INIT_SIZE
11854 #else
11855diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
11856index db75d07..8e6d0af 100644
11857--- a/arch/x86/boot/memory.c
11858+++ b/arch/x86/boot/memory.c
11859@@ -19,7 +19,7 @@
11860
11861 static int detect_memory_e820(void)
11862 {
11863- int count = 0;
11864+ unsigned int count = 0;
11865 struct biosregs ireg, oreg;
11866 struct e820entry *desc = boot_params.e820_map;
11867 static struct e820entry buf; /* static so it is zeroed */
11868diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
11869index 11e8c6e..fdbb1ed 100644
11870--- a/arch/x86/boot/video-vesa.c
11871+++ b/arch/x86/boot/video-vesa.c
11872@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
11873
11874 boot_params.screen_info.vesapm_seg = oreg.es;
11875 boot_params.screen_info.vesapm_off = oreg.di;
11876+ boot_params.screen_info.vesapm_size = oreg.cx;
11877 }
11878
11879 /*
11880diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
11881index 43eda28..5ab5fdb 100644
11882--- a/arch/x86/boot/video.c
11883+++ b/arch/x86/boot/video.c
11884@@ -96,7 +96,7 @@ static void store_mode_params(void)
11885 static unsigned int get_entry(void)
11886 {
11887 char entry_buf[4];
11888- int i, len = 0;
11889+ unsigned int i, len = 0;
11890 int key;
11891 unsigned int v;
11892
11893diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
11894index 9105655..5e37f27 100644
11895--- a/arch/x86/crypto/aes-x86_64-asm_64.S
11896+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
11897@@ -8,6 +8,8 @@
11898 * including this sentence is retained in full.
11899 */
11900
11901+#include <asm/alternative-asm.h>
11902+
11903 .extern crypto_ft_tab
11904 .extern crypto_it_tab
11905 .extern crypto_fl_tab
11906@@ -70,6 +72,8 @@
11907 je B192; \
11908 leaq 32(r9),r9;
11909
11910+#define ret pax_force_retaddr 0, 1; ret
11911+
11912 #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
11913 movq r1,r2; \
11914 movq r3,r4; \
11915diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
11916index 477e9d7..3ab339f 100644
11917--- a/arch/x86/crypto/aesni-intel_asm.S
11918+++ b/arch/x86/crypto/aesni-intel_asm.S
11919@@ -31,6 +31,7 @@
11920
11921 #include <linux/linkage.h>
11922 #include <asm/inst.h>
11923+#include <asm/alternative-asm.h>
11924
11925 #ifdef __x86_64__
11926 .data
11927@@ -1441,6 +1442,7 @@ _return_T_done_decrypt:
11928 pop %r14
11929 pop %r13
11930 pop %r12
11931+ pax_force_retaddr 0, 1
11932 ret
11933 ENDPROC(aesni_gcm_dec)
11934
11935@@ -1705,6 +1707,7 @@ _return_T_done_encrypt:
11936 pop %r14
11937 pop %r13
11938 pop %r12
11939+ pax_force_retaddr 0, 1
11940 ret
11941 ENDPROC(aesni_gcm_enc)
11942
11943@@ -1722,6 +1725,7 @@ _key_expansion_256a:
11944 pxor %xmm1, %xmm0
11945 movaps %xmm0, (TKEYP)
11946 add $0x10, TKEYP
11947+ pax_force_retaddr_bts
11948 ret
11949 ENDPROC(_key_expansion_128)
11950 ENDPROC(_key_expansion_256a)
11951@@ -1748,6 +1752,7 @@ _key_expansion_192a:
11952 shufps $0b01001110, %xmm2, %xmm1
11953 movaps %xmm1, 0x10(TKEYP)
11954 add $0x20, TKEYP
11955+ pax_force_retaddr_bts
11956 ret
11957 ENDPROC(_key_expansion_192a)
11958
11959@@ -1768,6 +1773,7 @@ _key_expansion_192b:
11960
11961 movaps %xmm0, (TKEYP)
11962 add $0x10, TKEYP
11963+ pax_force_retaddr_bts
11964 ret
11965 ENDPROC(_key_expansion_192b)
11966
11967@@ -1781,6 +1787,7 @@ _key_expansion_256b:
11968 pxor %xmm1, %xmm2
11969 movaps %xmm2, (TKEYP)
11970 add $0x10, TKEYP
11971+ pax_force_retaddr_bts
11972 ret
11973 ENDPROC(_key_expansion_256b)
11974
11975@@ -1894,6 +1901,7 @@ ENTRY(aesni_set_key)
11976 #ifndef __x86_64__
11977 popl KEYP
11978 #endif
11979+ pax_force_retaddr 0, 1
11980 ret
11981 ENDPROC(aesni_set_key)
11982
11983@@ -1916,6 +1924,7 @@ ENTRY(aesni_enc)
11984 popl KLEN
11985 popl KEYP
11986 #endif
11987+ pax_force_retaddr 0, 1
11988 ret
11989 ENDPROC(aesni_enc)
11990
11991@@ -1974,6 +1983,7 @@ _aesni_enc1:
11992 AESENC KEY STATE
11993 movaps 0x70(TKEYP), KEY
11994 AESENCLAST KEY STATE
11995+ pax_force_retaddr_bts
11996 ret
11997 ENDPROC(_aesni_enc1)
11998
11999@@ -2083,6 +2093,7 @@ _aesni_enc4:
12000 AESENCLAST KEY STATE2
12001 AESENCLAST KEY STATE3
12002 AESENCLAST KEY STATE4
12003+ pax_force_retaddr_bts
12004 ret
12005 ENDPROC(_aesni_enc4)
12006
12007@@ -2106,6 +2117,7 @@ ENTRY(aesni_dec)
12008 popl KLEN
12009 popl KEYP
12010 #endif
12011+ pax_force_retaddr 0, 1
12012 ret
12013 ENDPROC(aesni_dec)
12014
12015@@ -2164,6 +2176,7 @@ _aesni_dec1:
12016 AESDEC KEY STATE
12017 movaps 0x70(TKEYP), KEY
12018 AESDECLAST KEY STATE
12019+ pax_force_retaddr_bts
12020 ret
12021 ENDPROC(_aesni_dec1)
12022
12023@@ -2273,6 +2286,7 @@ _aesni_dec4:
12024 AESDECLAST KEY STATE2
12025 AESDECLAST KEY STATE3
12026 AESDECLAST KEY STATE4
12027+ pax_force_retaddr_bts
12028 ret
12029 ENDPROC(_aesni_dec4)
12030
12031@@ -2331,6 +2345,7 @@ ENTRY(aesni_ecb_enc)
12032 popl KEYP
12033 popl LEN
12034 #endif
12035+ pax_force_retaddr 0, 1
12036 ret
12037 ENDPROC(aesni_ecb_enc)
12038
12039@@ -2390,6 +2405,7 @@ ENTRY(aesni_ecb_dec)
12040 popl KEYP
12041 popl LEN
12042 #endif
12043+ pax_force_retaddr 0, 1
12044 ret
12045 ENDPROC(aesni_ecb_dec)
12046
12047@@ -2432,6 +2448,7 @@ ENTRY(aesni_cbc_enc)
12048 popl LEN
12049 popl IVP
12050 #endif
12051+ pax_force_retaddr 0, 1
12052 ret
12053 ENDPROC(aesni_cbc_enc)
12054
12055@@ -2523,6 +2540,7 @@ ENTRY(aesni_cbc_dec)
12056 popl LEN
12057 popl IVP
12058 #endif
12059+ pax_force_retaddr 0, 1
12060 ret
12061 ENDPROC(aesni_cbc_dec)
12062
12063@@ -2550,6 +2568,7 @@ _aesni_inc_init:
12064 mov $1, TCTR_LOW
12065 MOVQ_R64_XMM TCTR_LOW INC
12066 MOVQ_R64_XMM CTR TCTR_LOW
12067+ pax_force_retaddr_bts
12068 ret
12069 ENDPROC(_aesni_inc_init)
12070
12071@@ -2579,6 +2598,7 @@ _aesni_inc:
12072 .Linc_low:
12073 movaps CTR, IV
12074 PSHUFB_XMM BSWAP_MASK IV
12075+ pax_force_retaddr_bts
12076 ret
12077 ENDPROC(_aesni_inc)
12078
12079@@ -2640,6 +2660,7 @@ ENTRY(aesni_ctr_enc)
12080 .Lctr_enc_ret:
12081 movups IV, (IVP)
12082 .Lctr_enc_just_ret:
12083+ pax_force_retaddr 0, 1
12084 ret
12085 ENDPROC(aesni_ctr_enc)
12086
12087@@ -2766,6 +2787,7 @@ ENTRY(aesni_xts_crypt8)
12088 pxor INC, STATE4
12089 movdqu STATE4, 0x70(OUTP)
12090
12091+ pax_force_retaddr 0, 1
12092 ret
12093 ENDPROC(aesni_xts_crypt8)
12094
12095diff --git a/arch/x86/crypto/blowfish-avx2-asm_64.S b/arch/x86/crypto/blowfish-avx2-asm_64.S
12096index 784452e..46982c7 100644
12097--- a/arch/x86/crypto/blowfish-avx2-asm_64.S
12098+++ b/arch/x86/crypto/blowfish-avx2-asm_64.S
12099@@ -221,6 +221,7 @@ __blowfish_enc_blk32:
12100
12101 write_block(RXl, RXr);
12102
12103+ pax_force_retaddr 0, 1
12104 ret;
12105 ENDPROC(__blowfish_enc_blk32)
12106
12107@@ -250,6 +251,7 @@ __blowfish_dec_blk32:
12108
12109 write_block(RXl, RXr);
12110
12111+ pax_force_retaddr 0, 1
12112 ret;
12113 ENDPROC(__blowfish_dec_blk32)
12114
12115@@ -284,6 +286,7 @@ ENTRY(blowfish_ecb_enc_32way)
12116
12117 vzeroupper;
12118
12119+ pax_force_retaddr 0, 1
12120 ret;
12121 ENDPROC(blowfish_ecb_enc_32way)
12122
12123@@ -318,6 +321,7 @@ ENTRY(blowfish_ecb_dec_32way)
12124
12125 vzeroupper;
12126
12127+ pax_force_retaddr 0, 1
12128 ret;
12129 ENDPROC(blowfish_ecb_dec_32way)
12130
12131@@ -365,6 +369,7 @@ ENTRY(blowfish_cbc_dec_32way)
12132
12133 vzeroupper;
12134
12135+ pax_force_retaddr 0, 1
12136 ret;
12137 ENDPROC(blowfish_cbc_dec_32way)
12138
12139@@ -445,5 +450,6 @@ ENTRY(blowfish_ctr_32way)
12140
12141 vzeroupper;
12142
12143+ pax_force_retaddr 0, 1
12144 ret;
12145 ENDPROC(blowfish_ctr_32way)
12146diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
12147index 246c670..4d1ed00 100644
12148--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
12149+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
12150@@ -21,6 +21,7 @@
12151 */
12152
12153 #include <linux/linkage.h>
12154+#include <asm/alternative-asm.h>
12155
12156 .file "blowfish-x86_64-asm.S"
12157 .text
12158@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
12159 jnz .L__enc_xor;
12160
12161 write_block();
12162+ pax_force_retaddr 0, 1
12163 ret;
12164 .L__enc_xor:
12165 xor_block();
12166+ pax_force_retaddr 0, 1
12167 ret;
12168 ENDPROC(__blowfish_enc_blk)
12169
12170@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
12171
12172 movq %r11, %rbp;
12173
12174+ pax_force_retaddr 0, 1
12175 ret;
12176 ENDPROC(blowfish_dec_blk)
12177
12178@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
12179
12180 popq %rbx;
12181 popq %rbp;
12182+ pax_force_retaddr 0, 1
12183 ret;
12184
12185 .L__enc_xor4:
12186@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
12187
12188 popq %rbx;
12189 popq %rbp;
12190+ pax_force_retaddr 0, 1
12191 ret;
12192 ENDPROC(__blowfish_enc_blk_4way)
12193
12194@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
12195 popq %rbx;
12196 popq %rbp;
12197
12198+ pax_force_retaddr 0, 1
12199 ret;
12200 ENDPROC(blowfish_dec_blk_4way)
12201diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
12202index ce71f92..2dd5b1e 100644
12203--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
12204+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
12205@@ -16,6 +16,7 @@
12206 */
12207
12208 #include <linux/linkage.h>
12209+#include <asm/alternative-asm.h>
12210
12211 #define CAMELLIA_TABLE_BYTE_LEN 272
12212
12213@@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
12214 roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
12215 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
12216 %rcx, (%r9));
12217+ pax_force_retaddr_bts
12218 ret;
12219 ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
12220
12221@@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
12222 roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
12223 %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
12224 %rax, (%r9));
12225+ pax_force_retaddr_bts
12226 ret;
12227 ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
12228
12229@@ -780,6 +783,7 @@ __camellia_enc_blk16:
12230 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
12231 %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
12232
12233+ pax_force_retaddr_bts
12234 ret;
12235
12236 .align 8
12237@@ -865,6 +869,7 @@ __camellia_dec_blk16:
12238 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
12239 %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
12240
12241+ pax_force_retaddr_bts
12242 ret;
12243
12244 .align 8
12245@@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way)
12246 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
12247 %xmm8, %rsi);
12248
12249+ pax_force_retaddr 0, 1
12250 ret;
12251 ENDPROC(camellia_ecb_enc_16way)
12252
12253@@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way)
12254 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
12255 %xmm8, %rsi);
12256
12257+ pax_force_retaddr 0, 1
12258 ret;
12259 ENDPROC(camellia_ecb_dec_16way)
12260
12261@@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way)
12262 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
12263 %xmm8, %rsi);
12264
12265+ pax_force_retaddr 0, 1
12266 ret;
12267 ENDPROC(camellia_cbc_dec_16way)
12268
12269@@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way)
12270 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
12271 %xmm8, %rsi);
12272
12273+ pax_force_retaddr 0, 1
12274 ret;
12275 ENDPROC(camellia_ctr_16way)
12276
12277@@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way:
12278 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
12279 %xmm8, %rsi);
12280
12281+ pax_force_retaddr 0, 1
12282 ret;
12283 ENDPROC(camellia_xts_crypt_16way)
12284
12285diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
12286index 91a1878..bcf340a 100644
12287--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
12288+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
12289@@ -11,6 +11,7 @@
12290 */
12291
12292 #include <linux/linkage.h>
12293+#include <asm/alternative-asm.h>
12294
12295 #define CAMELLIA_TABLE_BYTE_LEN 272
12296
12297@@ -212,6 +213,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
12298 roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
12299 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
12300 %rcx, (%r9));
12301+ pax_force_retaddr_bts
12302 ret;
12303 ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
12304
12305@@ -220,6 +222,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
12306 roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
12307 %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
12308 %rax, (%r9));
12309+ pax_force_retaddr_bts
12310 ret;
12311 ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
12312
12313@@ -802,6 +805,7 @@ __camellia_enc_blk32:
12314 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
12315 %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
12316
12317+ pax_force_retaddr_bts
12318 ret;
12319
12320 .align 8
12321@@ -887,6 +891,7 @@ __camellia_dec_blk32:
12322 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
12323 %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
12324
12325+ pax_force_retaddr_bts
12326 ret;
12327
12328 .align 8
12329@@ -930,6 +935,7 @@ ENTRY(camellia_ecb_enc_32way)
12330
12331 vzeroupper;
12332
12333+ pax_force_retaddr 0, 1
12334 ret;
12335 ENDPROC(camellia_ecb_enc_32way)
12336
12337@@ -962,6 +968,7 @@ ENTRY(camellia_ecb_dec_32way)
12338
12339 vzeroupper;
12340
12341+ pax_force_retaddr 0, 1
12342 ret;
12343 ENDPROC(camellia_ecb_dec_32way)
12344
12345@@ -1028,6 +1035,7 @@ ENTRY(camellia_cbc_dec_32way)
12346
12347 vzeroupper;
12348
12349+ pax_force_retaddr 0, 1
12350 ret;
12351 ENDPROC(camellia_cbc_dec_32way)
12352
12353@@ -1166,6 +1174,7 @@ ENTRY(camellia_ctr_32way)
12354
12355 vzeroupper;
12356
12357+ pax_force_retaddr 0, 1
12358 ret;
12359 ENDPROC(camellia_ctr_32way)
12360
12361@@ -1331,6 +1340,7 @@ camellia_xts_crypt_32way:
12362
12363 vzeroupper;
12364
12365+ pax_force_retaddr 0, 1
12366 ret;
12367 ENDPROC(camellia_xts_crypt_32way)
12368
12369diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
12370index 310319c..ce174a4 100644
12371--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
12372+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
12373@@ -21,6 +21,7 @@
12374 */
12375
12376 #include <linux/linkage.h>
12377+#include <asm/alternative-asm.h>
12378
12379 .file "camellia-x86_64-asm_64.S"
12380 .text
12381@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
12382 enc_outunpack(mov, RT1);
12383
12384 movq RRBP, %rbp;
12385+ pax_force_retaddr 0, 1
12386 ret;
12387
12388 .L__enc_xor:
12389 enc_outunpack(xor, RT1);
12390
12391 movq RRBP, %rbp;
12392+ pax_force_retaddr 0, 1
12393 ret;
12394 ENDPROC(__camellia_enc_blk)
12395
12396@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
12397 dec_outunpack();
12398
12399 movq RRBP, %rbp;
12400+ pax_force_retaddr 0, 1
12401 ret;
12402 ENDPROC(camellia_dec_blk)
12403
12404@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
12405
12406 movq RRBP, %rbp;
12407 popq %rbx;
12408+ pax_force_retaddr 0, 1
12409 ret;
12410
12411 .L__enc2_xor:
12412@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
12413
12414 movq RRBP, %rbp;
12415 popq %rbx;
12416+ pax_force_retaddr 0, 1
12417 ret;
12418 ENDPROC(__camellia_enc_blk_2way)
12419
12420@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
12421
12422 movq RRBP, %rbp;
12423 movq RXOR, %rbx;
12424+ pax_force_retaddr 0, 1
12425 ret;
12426 ENDPROC(camellia_dec_blk_2way)
12427diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
12428index c35fd5d..c1ee236 100644
12429--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
12430+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
12431@@ -24,6 +24,7 @@
12432 */
12433
12434 #include <linux/linkage.h>
12435+#include <asm/alternative-asm.h>
12436
12437 .file "cast5-avx-x86_64-asm_64.S"
12438
12439@@ -281,6 +282,7 @@ __cast5_enc_blk16:
12440 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
12441 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
12442
12443+ pax_force_retaddr 0, 1
12444 ret;
12445 ENDPROC(__cast5_enc_blk16)
12446
12447@@ -352,6 +354,7 @@ __cast5_dec_blk16:
12448 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
12449 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
12450
12451+ pax_force_retaddr 0, 1
12452 ret;
12453
12454 .L__skip_dec:
12455@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
12456 vmovdqu RR4, (6*4*4)(%r11);
12457 vmovdqu RL4, (7*4*4)(%r11);
12458
12459+ pax_force_retaddr
12460 ret;
12461 ENDPROC(cast5_ecb_enc_16way)
12462
12463@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
12464 vmovdqu RR4, (6*4*4)(%r11);
12465 vmovdqu RL4, (7*4*4)(%r11);
12466
12467+ pax_force_retaddr
12468 ret;
12469 ENDPROC(cast5_ecb_dec_16way)
12470
12471@@ -469,6 +474,7 @@ ENTRY(cast5_cbc_dec_16way)
12472
12473 popq %r12;
12474
12475+ pax_force_retaddr
12476 ret;
12477 ENDPROC(cast5_cbc_dec_16way)
12478
12479@@ -542,5 +548,6 @@ ENTRY(cast5_ctr_16way)
12480
12481 popq %r12;
12482
12483+ pax_force_retaddr
12484 ret;
12485 ENDPROC(cast5_ctr_16way)
12486diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
12487index e3531f8..18ded3a 100644
12488--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
12489+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
12490@@ -24,6 +24,7 @@
12491 */
12492
12493 #include <linux/linkage.h>
12494+#include <asm/alternative-asm.h>
12495 #include "glue_helper-asm-avx.S"
12496
12497 .file "cast6-avx-x86_64-asm_64.S"
12498@@ -295,6 +296,7 @@ __cast6_enc_blk8:
12499 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
12500 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
12501
12502+ pax_force_retaddr 0, 1
12503 ret;
12504 ENDPROC(__cast6_enc_blk8)
12505
12506@@ -340,6 +342,7 @@ __cast6_dec_blk8:
12507 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
12508 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
12509
12510+ pax_force_retaddr 0, 1
12511 ret;
12512 ENDPROC(__cast6_dec_blk8)
12513
12514@@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
12515
12516 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
12517
12518+ pax_force_retaddr
12519 ret;
12520 ENDPROC(cast6_ecb_enc_8way)
12521
12522@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
12523
12524 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
12525
12526+ pax_force_retaddr
12527 ret;
12528 ENDPROC(cast6_ecb_dec_8way)
12529
12530@@ -399,6 +404,7 @@ ENTRY(cast6_cbc_dec_8way)
12531
12532 popq %r12;
12533
12534+ pax_force_retaddr
12535 ret;
12536 ENDPROC(cast6_cbc_dec_8way)
12537
12538@@ -424,6 +430,7 @@ ENTRY(cast6_ctr_8way)
12539
12540 popq %r12;
12541
12542+ pax_force_retaddr
12543 ret;
12544 ENDPROC(cast6_ctr_8way)
12545
12546@@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
12547 /* dst <= regs xor IVs(in dst) */
12548 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
12549
12550+ pax_force_retaddr
12551 ret;
12552 ENDPROC(cast6_xts_enc_8way)
12553
12554@@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
12555 /* dst <= regs xor IVs(in dst) */
12556 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
12557
12558+ pax_force_retaddr
12559 ret;
12560 ENDPROC(cast6_xts_dec_8way)
12561diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
12562index dbc4339..3d868c5 100644
12563--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
12564+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
12565@@ -45,6 +45,7 @@
12566
12567 #include <asm/inst.h>
12568 #include <linux/linkage.h>
12569+#include <asm/alternative-asm.h>
12570
12571 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
12572
12573@@ -312,6 +313,7 @@ do_return:
12574 popq %rsi
12575 popq %rdi
12576 popq %rbx
12577+ pax_force_retaddr 0, 1
12578 ret
12579
12580 ################################################################
12581diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
12582index 586f41a..d02851e 100644
12583--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
12584+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
12585@@ -18,6 +18,7 @@
12586
12587 #include <linux/linkage.h>
12588 #include <asm/inst.h>
12589+#include <asm/alternative-asm.h>
12590
12591 .data
12592
12593@@ -93,6 +94,7 @@ __clmul_gf128mul_ble:
12594 psrlq $1, T2
12595 pxor T2, T1
12596 pxor T1, DATA
12597+ pax_force_retaddr
12598 ret
12599 ENDPROC(__clmul_gf128mul_ble)
12600
12601@@ -105,6 +107,7 @@ ENTRY(clmul_ghash_mul)
12602 call __clmul_gf128mul_ble
12603 PSHUFB_XMM BSWAP DATA
12604 movups DATA, (%rdi)
12605+ pax_force_retaddr
12606 ret
12607 ENDPROC(clmul_ghash_mul)
12608
12609@@ -132,6 +135,7 @@ ENTRY(clmul_ghash_update)
12610 PSHUFB_XMM BSWAP DATA
12611 movups DATA, (%rdi)
12612 .Lupdate_just_ret:
12613+ pax_force_retaddr
12614 ret
12615 ENDPROC(clmul_ghash_update)
12616
12617@@ -157,5 +161,6 @@ ENTRY(clmul_ghash_setkey)
12618 pand .Lpoly, %xmm1
12619 pxor %xmm1, %xmm0
12620 movups %xmm0, (%rdi)
12621+ pax_force_retaddr
12622 ret
12623 ENDPROC(clmul_ghash_setkey)
12624diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
12625index 9279e0b..9270820 100644
12626--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
12627+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
12628@@ -1,4 +1,5 @@
12629 #include <linux/linkage.h>
12630+#include <asm/alternative-asm.h>
12631
12632 # enter salsa20_encrypt_bytes
12633 ENTRY(salsa20_encrypt_bytes)
12634@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
12635 add %r11,%rsp
12636 mov %rdi,%rax
12637 mov %rsi,%rdx
12638+ pax_force_retaddr 0, 1
12639 ret
12640 # bytesatleast65:
12641 ._bytesatleast65:
12642@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
12643 add %r11,%rsp
12644 mov %rdi,%rax
12645 mov %rsi,%rdx
12646+ pax_force_retaddr
12647 ret
12648 ENDPROC(salsa20_keysetup)
12649
12650@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
12651 add %r11,%rsp
12652 mov %rdi,%rax
12653 mov %rsi,%rdx
12654+ pax_force_retaddr
12655 ret
12656 ENDPROC(salsa20_ivsetup)
12657diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
12658index 2f202f4..d9164d6 100644
12659--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
12660+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
12661@@ -24,6 +24,7 @@
12662 */
12663
12664 #include <linux/linkage.h>
12665+#include <asm/alternative-asm.h>
12666 #include "glue_helper-asm-avx.S"
12667
12668 .file "serpent-avx-x86_64-asm_64.S"
12669@@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
12670 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
12671 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
12672
12673+ pax_force_retaddr
12674 ret;
12675 ENDPROC(__serpent_enc_blk8_avx)
12676
12677@@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
12678 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
12679 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
12680
12681+ pax_force_retaddr
12682 ret;
12683 ENDPROC(__serpent_dec_blk8_avx)
12684
12685@@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
12686
12687 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
12688
12689+ pax_force_retaddr
12690 ret;
12691 ENDPROC(serpent_ecb_enc_8way_avx)
12692
12693@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
12694
12695 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
12696
12697+ pax_force_retaddr
12698 ret;
12699 ENDPROC(serpent_ecb_dec_8way_avx)
12700
12701@@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
12702
12703 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
12704
12705+ pax_force_retaddr
12706 ret;
12707 ENDPROC(serpent_cbc_dec_8way_avx)
12708
12709@@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
12710
12711 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
12712
12713+ pax_force_retaddr
12714 ret;
12715 ENDPROC(serpent_ctr_8way_avx)
12716
12717@@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
12718 /* dst <= regs xor IVs(in dst) */
12719 store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
12720
12721+ pax_force_retaddr
12722 ret;
12723 ENDPROC(serpent_xts_enc_8way_avx)
12724
12725@@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
12726 /* dst <= regs xor IVs(in dst) */
12727 store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
12728
12729+ pax_force_retaddr
12730 ret;
12731 ENDPROC(serpent_xts_dec_8way_avx)
12732diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
12733index b222085..abd483c 100644
12734--- a/arch/x86/crypto/serpent-avx2-asm_64.S
12735+++ b/arch/x86/crypto/serpent-avx2-asm_64.S
12736@@ -15,6 +15,7 @@
12737 */
12738
12739 #include <linux/linkage.h>
12740+#include <asm/alternative-asm.h>
12741 #include "glue_helper-asm-avx2.S"
12742
12743 .file "serpent-avx2-asm_64.S"
12744@@ -610,6 +611,7 @@ __serpent_enc_blk16:
12745 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
12746 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
12747
12748+ pax_force_retaddr
12749 ret;
12750 ENDPROC(__serpent_enc_blk16)
12751
12752@@ -664,6 +666,7 @@ __serpent_dec_blk16:
12753 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
12754 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
12755
12756+ pax_force_retaddr
12757 ret;
12758 ENDPROC(__serpent_dec_blk16)
12759
12760@@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way)
12761
12762 vzeroupper;
12763
12764+ pax_force_retaddr
12765 ret;
12766 ENDPROC(serpent_ecb_enc_16way)
12767
12768@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)
12769
12770 vzeroupper;
12771
12772+ pax_force_retaddr
12773 ret;
12774 ENDPROC(serpent_ecb_dec_16way)
12775
12776@@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way)
12777
12778 vzeroupper;
12779
12780+ pax_force_retaddr
12781 ret;
12782 ENDPROC(serpent_cbc_dec_16way)
12783
12784@@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way)
12785
12786 vzeroupper;
12787
12788+ pax_force_retaddr
12789 ret;
12790 ENDPROC(serpent_ctr_16way)
12791
12792@@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way)
12793
12794 vzeroupper;
12795
12796+ pax_force_retaddr
12797 ret;
12798 ENDPROC(serpent_xts_enc_16way)
12799
12800@@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way)
12801
12802 vzeroupper;
12803
12804+ pax_force_retaddr
12805 ret;
12806 ENDPROC(serpent_xts_dec_16way)
12807diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
12808index acc066c..1559cc4 100644
12809--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
12810+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
12811@@ -25,6 +25,7 @@
12812 */
12813
12814 #include <linux/linkage.h>
12815+#include <asm/alternative-asm.h>
12816
12817 .file "serpent-sse2-x86_64-asm_64.S"
12818 .text
12819@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
12820 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
12821 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
12822
12823+ pax_force_retaddr
12824 ret;
12825
12826 .L__enc_xor8:
12827 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
12828 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
12829
12830+ pax_force_retaddr
12831 ret;
12832 ENDPROC(__serpent_enc_blk_8way)
12833
12834@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
12835 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
12836 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
12837
12838+ pax_force_retaddr
12839 ret;
12840 ENDPROC(serpent_dec_blk_8way)
12841diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
12842index a410950..3356d42 100644
12843--- a/arch/x86/crypto/sha1_ssse3_asm.S
12844+++ b/arch/x86/crypto/sha1_ssse3_asm.S
12845@@ -29,6 +29,7 @@
12846 */
12847
12848 #include <linux/linkage.h>
12849+#include <asm/alternative-asm.h>
12850
12851 #define CTX %rdi // arg1
12852 #define BUF %rsi // arg2
12853@@ -104,6 +105,7 @@
12854 pop %r12
12855 pop %rbp
12856 pop %rbx
12857+ pax_force_retaddr 0, 1
12858 ret
12859
12860 ENDPROC(\name)
12861diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
12862index 642f156..4ab07b9 100644
12863--- a/arch/x86/crypto/sha256-avx-asm.S
12864+++ b/arch/x86/crypto/sha256-avx-asm.S
12865@@ -49,6 +49,7 @@
12866
12867 #ifdef CONFIG_AS_AVX
12868 #include <linux/linkage.h>
12869+#include <asm/alternative-asm.h>
12870
12871 ## assume buffers not aligned
12872 #define VMOVDQ vmovdqu
12873@@ -460,6 +461,7 @@ done_hash:
12874 popq %r13
12875 popq %rbp
12876 popq %rbx
12877+ pax_force_retaddr 0, 1
12878 ret
12879 ENDPROC(sha256_transform_avx)
12880
12881diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
12882index 9e86944..2e7f95a 100644
12883--- a/arch/x86/crypto/sha256-avx2-asm.S
12884+++ b/arch/x86/crypto/sha256-avx2-asm.S
12885@@ -50,6 +50,7 @@
12886
12887 #ifdef CONFIG_AS_AVX2
12888 #include <linux/linkage.h>
12889+#include <asm/alternative-asm.h>
12890
12891 ## assume buffers not aligned
12892 #define VMOVDQ vmovdqu
12893@@ -720,6 +721,7 @@ done_hash:
12894 popq %r12
12895 popq %rbp
12896 popq %rbx
12897+ pax_force_retaddr 0, 1
12898 ret
12899 ENDPROC(sha256_transform_rorx)
12900
12901diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
12902index f833b74..c36ed14 100644
12903--- a/arch/x86/crypto/sha256-ssse3-asm.S
12904+++ b/arch/x86/crypto/sha256-ssse3-asm.S
12905@@ -47,6 +47,7 @@
12906 ########################################################################
12907
12908 #include <linux/linkage.h>
12909+#include <asm/alternative-asm.h>
12910
12911 ## assume buffers not aligned
12912 #define MOVDQ movdqu
12913@@ -471,6 +472,7 @@ done_hash:
12914 popq %rbp
12915 popq %rbx
12916
12917+ pax_force_retaddr 0, 1
12918 ret
12919 ENDPROC(sha256_transform_ssse3)
12920
12921diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
12922index 974dde9..4533d34 100644
12923--- a/arch/x86/crypto/sha512-avx-asm.S
12924+++ b/arch/x86/crypto/sha512-avx-asm.S
12925@@ -49,6 +49,7 @@
12926
12927 #ifdef CONFIG_AS_AVX
12928 #include <linux/linkage.h>
12929+#include <asm/alternative-asm.h>
12930
12931 .text
12932
12933@@ -364,6 +365,7 @@ updateblock:
12934 mov frame_RSPSAVE(%rsp), %rsp
12935
12936 nowork:
12937+ pax_force_retaddr 0, 1
12938 ret
12939 ENDPROC(sha512_transform_avx)
12940
12941diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
12942index 568b961..061ef1d 100644
12943--- a/arch/x86/crypto/sha512-avx2-asm.S
12944+++ b/arch/x86/crypto/sha512-avx2-asm.S
12945@@ -51,6 +51,7 @@
12946
12947 #ifdef CONFIG_AS_AVX2
12948 #include <linux/linkage.h>
12949+#include <asm/alternative-asm.h>
12950
12951 .text
12952
12953@@ -678,6 +679,7 @@ done_hash:
12954
12955 # Restore Stack Pointer
12956 mov frame_RSPSAVE(%rsp), %rsp
12957+ pax_force_retaddr 0, 1
12958 ret
12959 ENDPROC(sha512_transform_rorx)
12960
12961diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
12962index fb56855..e23914f 100644
12963--- a/arch/x86/crypto/sha512-ssse3-asm.S
12964+++ b/arch/x86/crypto/sha512-ssse3-asm.S
12965@@ -48,6 +48,7 @@
12966 ########################################################################
12967
12968 #include <linux/linkage.h>
12969+#include <asm/alternative-asm.h>
12970
12971 .text
12972
12973@@ -363,6 +364,7 @@ updateblock:
12974 mov frame_RSPSAVE(%rsp), %rsp
12975
12976 nowork:
12977+ pax_force_retaddr 0, 1
12978 ret
12979 ENDPROC(sha512_transform_ssse3)
12980
12981diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
12982index 0505813..63b1d00 100644
12983--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
12984+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
12985@@ -24,6 +24,7 @@
12986 */
12987
12988 #include <linux/linkage.h>
12989+#include <asm/alternative-asm.h>
12990 #include "glue_helper-asm-avx.S"
12991
12992 .file "twofish-avx-x86_64-asm_64.S"
12993@@ -284,6 +285,7 @@ __twofish_enc_blk8:
12994 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
12995 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
12996
12997+ pax_force_retaddr 0, 1
12998 ret;
12999 ENDPROC(__twofish_enc_blk8)
13000
13001@@ -324,6 +326,7 @@ __twofish_dec_blk8:
13002 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
13003 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
13004
13005+ pax_force_retaddr 0, 1
13006 ret;
13007 ENDPROC(__twofish_dec_blk8)
13008
13009@@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
13010
13011 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
13012
13013+ pax_force_retaddr 0, 1
13014 ret;
13015 ENDPROC(twofish_ecb_enc_8way)
13016
13017@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
13018
13019 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13020
13021+ pax_force_retaddr 0, 1
13022 ret;
13023 ENDPROC(twofish_ecb_dec_8way)
13024
13025@@ -383,6 +388,7 @@ ENTRY(twofish_cbc_dec_8way)
13026
13027 popq %r12;
13028
13029+ pax_force_retaddr 0, 1
13030 ret;
13031 ENDPROC(twofish_cbc_dec_8way)
13032
13033@@ -408,6 +414,7 @@ ENTRY(twofish_ctr_8way)
13034
13035 popq %r12;
13036
13037+ pax_force_retaddr 0, 1
13038 ret;
13039 ENDPROC(twofish_ctr_8way)
13040
13041@@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
13042 /* dst <= regs xor IVs(in dst) */
13043 store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
13044
13045+ pax_force_retaddr 0, 1
13046 ret;
13047 ENDPROC(twofish_xts_enc_8way)
13048
13049@@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
13050 /* dst <= regs xor IVs(in dst) */
13051 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13052
13053+ pax_force_retaddr 0, 1
13054 ret;
13055 ENDPROC(twofish_xts_dec_8way)
13056diff --git a/arch/x86/crypto/twofish-avx2-asm_64.S b/arch/x86/crypto/twofish-avx2-asm_64.S
13057index e1a83b9..33006b9 100644
13058--- a/arch/x86/crypto/twofish-avx2-asm_64.S
13059+++ b/arch/x86/crypto/twofish-avx2-asm_64.S
13060@@ -11,6 +11,7 @@
13061 */
13062
13063 #include <linux/linkage.h>
13064+#include <asm/alternative-asm.h>
13065 #include "glue_helper-asm-avx2.S"
13066
13067 .file "twofish-avx2-asm_64.S"
13068@@ -422,6 +423,7 @@ __twofish_enc_blk16:
13069 outunpack_enc16(RA, RB, RC, RD);
13070 write_blocks16(RA, RB, RC, RD);
13071
13072+ pax_force_retaddr_bts
13073 ret;
13074 ENDPROC(__twofish_enc_blk16)
13075
13076@@ -454,6 +456,7 @@ __twofish_dec_blk16:
13077 outunpack_dec16(RA, RB, RC, RD);
13078 write_blocks16(RA, RB, RC, RD);
13079
13080+ pax_force_retaddr_bts
13081 ret;
13082 ENDPROC(__twofish_dec_blk16)
13083
13084@@ -476,6 +479,7 @@ ENTRY(twofish_ecb_enc_16way)
13085 popq %r12;
13086 vzeroupper;
13087
13088+ pax_force_retaddr 0, 1
13089 ret;
13090 ENDPROC(twofish_ecb_enc_16way)
13091
13092@@ -498,6 +502,7 @@ ENTRY(twofish_ecb_dec_16way)
13093 popq %r12;
13094 vzeroupper;
13095
13096+ pax_force_retaddr 0, 1
13097 ret;
13098 ENDPROC(twofish_ecb_dec_16way)
13099
13100@@ -521,6 +526,7 @@ ENTRY(twofish_cbc_dec_16way)
13101 popq %r12;
13102 vzeroupper;
13103
13104+ pax_force_retaddr 0, 1
13105 ret;
13106 ENDPROC(twofish_cbc_dec_16way)
13107
13108@@ -546,6 +552,7 @@ ENTRY(twofish_ctr_16way)
13109 popq %r12;
13110 vzeroupper;
13111
13112+ pax_force_retaddr 0, 1
13113 ret;
13114 ENDPROC(twofish_ctr_16way)
13115
13116@@ -574,6 +581,7 @@ twofish_xts_crypt_16way:
13117 popq %r12;
13118 vzeroupper;
13119
13120+ pax_force_retaddr 0, 1
13121 ret;
13122 ENDPROC(twofish_xts_crypt_16way)
13123
13124diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
13125index 1c3b7ce..b365c5e 100644
13126--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
13127+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
13128@@ -21,6 +21,7 @@
13129 */
13130
13131 #include <linux/linkage.h>
13132+#include <asm/alternative-asm.h>
13133
13134 .file "twofish-x86_64-asm-3way.S"
13135 .text
13136@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
13137 popq %r13;
13138 popq %r14;
13139 popq %r15;
13140+ pax_force_retaddr 0, 1
13141 ret;
13142
13143 .L__enc_xor3:
13144@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
13145 popq %r13;
13146 popq %r14;
13147 popq %r15;
13148+ pax_force_retaddr 0, 1
13149 ret;
13150 ENDPROC(__twofish_enc_blk_3way)
13151
13152@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
13153 popq %r13;
13154 popq %r14;
13155 popq %r15;
13156+ pax_force_retaddr 0, 1
13157 ret;
13158 ENDPROC(twofish_dec_blk_3way)
13159diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
13160index a039d21..29e7615 100644
13161--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
13162+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
13163@@ -22,6 +22,7 @@
13164
13165 #include <linux/linkage.h>
13166 #include <asm/asm-offsets.h>
13167+#include <asm/alternative-asm.h>
13168
13169 #define a_offset 0
13170 #define b_offset 4
13171@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
13172
13173 popq R1
13174 movq $1,%rax
13175+ pax_force_retaddr 0, 1
13176 ret
13177 ENDPROC(twofish_enc_blk)
13178
13179@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
13180
13181 popq R1
13182 movq $1,%rax
13183+ pax_force_retaddr 0, 1
13184 ret
13185 ENDPROC(twofish_dec_blk)
13186diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
13187index 52ff81c..98af645 100644
13188--- a/arch/x86/ia32/ia32_aout.c
13189+++ b/arch/x86/ia32/ia32_aout.c
13190@@ -159,6 +159,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
13191 unsigned long dump_start, dump_size;
13192 struct user32 dump;
13193
13194+ memset(&dump, 0, sizeof(dump));
13195+
13196 fs = get_fs();
13197 set_fs(KERNEL_DS);
13198 has_dumped = 1;
13199diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
13200index cf1a471..5ba2673 100644
13201--- a/arch/x86/ia32/ia32_signal.c
13202+++ b/arch/x86/ia32/ia32_signal.c
13203@@ -340,7 +340,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
13204 sp -= frame_size;
13205 /* Align the stack pointer according to the i386 ABI,
13206 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
13207- sp = ((sp + 4) & -16ul) - 4;
13208+ sp = ((sp - 12) & -16ul) - 4;
13209 return (void __user *) sp;
13210 }
13211
13212@@ -398,7 +398,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
13213 * These are actually not used anymore, but left because some
13214 * gdb versions depend on them as a marker.
13215 */
13216- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
13217+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
13218 } put_user_catch(err);
13219
13220 if (err)
13221@@ -440,7 +440,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
13222 0xb8,
13223 __NR_ia32_rt_sigreturn,
13224 0x80cd,
13225- 0,
13226+ 0
13227 };
13228
13229 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
13230@@ -459,20 +459,22 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
13231 else
13232 put_user_ex(0, &frame->uc.uc_flags);
13233 put_user_ex(0, &frame->uc.uc_link);
13234- err |= __compat_save_altstack(&frame->uc.uc_stack, regs->sp);
13235+ __compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp);
13236
13237 if (ksig->ka.sa.sa_flags & SA_RESTORER)
13238 restorer = ksig->ka.sa.sa_restorer;
13239+ else if (current->mm->context.vdso)
13240+ /* Return stub is in 32bit vsyscall page */
13241+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
13242 else
13243- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
13244- rt_sigreturn);
13245+ restorer = &frame->retcode;
13246 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
13247
13248 /*
13249 * Not actually used anymore, but left because some gdb
13250 * versions need it.
13251 */
13252- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
13253+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
13254 } put_user_catch(err);
13255
13256 err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
13257diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
13258index 474dc1b..9297c58 100644
13259--- a/arch/x86/ia32/ia32entry.S
13260+++ b/arch/x86/ia32/ia32entry.S
13261@@ -15,8 +15,10 @@
13262 #include <asm/irqflags.h>
13263 #include <asm/asm.h>
13264 #include <asm/smap.h>
13265+#include <asm/pgtable.h>
13266 #include <linux/linkage.h>
13267 #include <linux/err.h>
13268+#include <asm/alternative-asm.h>
13269
13270 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13271 #include <linux/elf-em.h>
13272@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
13273 ENDPROC(native_irq_enable_sysexit)
13274 #endif
13275
13276+ .macro pax_enter_kernel_user
13277+ pax_set_fptr_mask
13278+#ifdef CONFIG_PAX_MEMORY_UDEREF
13279+ call pax_enter_kernel_user
13280+#endif
13281+ .endm
13282+
13283+ .macro pax_exit_kernel_user
13284+#ifdef CONFIG_PAX_MEMORY_UDEREF
13285+ call pax_exit_kernel_user
13286+#endif
13287+#ifdef CONFIG_PAX_RANDKSTACK
13288+ pushq %rax
13289+ pushq %r11
13290+ call pax_randomize_kstack
13291+ popq %r11
13292+ popq %rax
13293+#endif
13294+ .endm
13295+
13296+ .macro pax_erase_kstack
13297+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13298+ call pax_erase_kstack
13299+#endif
13300+ .endm
13301+
13302 /*
13303 * 32bit SYSENTER instruction entry.
13304 *
13305@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
13306 CFI_REGISTER rsp,rbp
13307 SWAPGS_UNSAFE_STACK
13308 movq PER_CPU_VAR(kernel_stack), %rsp
13309- addq $(KERNEL_STACK_OFFSET),%rsp
13310- /*
13311- * No need to follow this irqs on/off section: the syscall
13312- * disabled irqs, here we enable it straight after entry:
13313- */
13314- ENABLE_INTERRUPTS(CLBR_NONE)
13315 movl %ebp,%ebp /* zero extension */
13316 pushq_cfi $__USER32_DS
13317 /*CFI_REL_OFFSET ss,0*/
13318@@ -135,24 +157,49 @@ ENTRY(ia32_sysenter_target)
13319 CFI_REL_OFFSET rsp,0
13320 pushfq_cfi
13321 /*CFI_REL_OFFSET rflags,0*/
13322- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
13323- CFI_REGISTER rip,r10
13324+ orl $X86_EFLAGS_IF,(%rsp)
13325+ GET_THREAD_INFO(%r11)
13326+ movl TI_sysenter_return(%r11), %r11d
13327+ CFI_REGISTER rip,r11
13328 pushq_cfi $__USER32_CS
13329 /*CFI_REL_OFFSET cs,0*/
13330 movl %eax, %eax
13331- pushq_cfi %r10
13332+ pushq_cfi %r11
13333 CFI_REL_OFFSET rip,0
13334 pushq_cfi %rax
13335 cld
13336 SAVE_ARGS 0,1,0
13337+ pax_enter_kernel_user
13338+
13339+#ifdef CONFIG_PAX_RANDKSTACK
13340+ pax_erase_kstack
13341+#endif
13342+
13343+ /*
13344+ * No need to follow this irqs on/off section: the syscall
13345+ * disabled irqs, here we enable it straight after entry:
13346+ */
13347+ ENABLE_INTERRUPTS(CLBR_NONE)
13348 /* no need to do an access_ok check here because rbp has been
13349 32bit zero extended */
13350+
13351+#ifdef CONFIG_PAX_MEMORY_UDEREF
13352+ addq pax_user_shadow_base,%rbp
13353+ ASM_PAX_OPEN_USERLAND
13354+#endif
13355+
13356 ASM_STAC
13357 1: movl (%rbp),%ebp
13358 _ASM_EXTABLE(1b,ia32_badarg)
13359 ASM_CLAC
13360- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13361- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13362+
13363+#ifdef CONFIG_PAX_MEMORY_UDEREF
13364+ ASM_PAX_CLOSE_USERLAND
13365+#endif
13366+
13367+ GET_THREAD_INFO(%r11)
13368+ orl $TS_COMPAT,TI_status(%r11)
13369+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
13370 CFI_REMEMBER_STATE
13371 jnz sysenter_tracesys
13372 cmpq $(IA32_NR_syscalls-1),%rax
13373@@ -162,12 +209,15 @@ sysenter_do_call:
13374 sysenter_dispatch:
13375 call *ia32_sys_call_table(,%rax,8)
13376 movq %rax,RAX-ARGOFFSET(%rsp)
13377+ GET_THREAD_INFO(%r11)
13378 DISABLE_INTERRUPTS(CLBR_NONE)
13379 TRACE_IRQS_OFF
13380- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13381+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
13382 jnz sysexit_audit
13383 sysexit_from_sys_call:
13384- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13385+ pax_exit_kernel_user
13386+ pax_erase_kstack
13387+ andl $~TS_COMPAT,TI_status(%r11)
13388 /* clear IF, that popfq doesn't enable interrupts early */
13389 andl $~0x200,EFLAGS-R11(%rsp)
13390 movl RIP-R11(%rsp),%edx /* User %eip */
13391@@ -193,6 +243,9 @@ sysexit_from_sys_call:
13392 movl %eax,%esi /* 2nd arg: syscall number */
13393 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
13394 call __audit_syscall_entry
13395+
13396+ pax_erase_kstack
13397+
13398 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
13399 cmpq $(IA32_NR_syscalls-1),%rax
13400 ja ia32_badsys
13401@@ -204,7 +257,7 @@ sysexit_from_sys_call:
13402 .endm
13403
13404 .macro auditsys_exit exit
13405- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13406+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
13407 jnz ia32_ret_from_sys_call
13408 TRACE_IRQS_ON
13409 ENABLE_INTERRUPTS(CLBR_NONE)
13410@@ -215,11 +268,12 @@ sysexit_from_sys_call:
13411 1: setbe %al /* 1 if error, 0 if not */
13412 movzbl %al,%edi /* zero-extend that into %edi */
13413 call __audit_syscall_exit
13414+ GET_THREAD_INFO(%r11)
13415 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
13416 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
13417 DISABLE_INTERRUPTS(CLBR_NONE)
13418 TRACE_IRQS_OFF
13419- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13420+ testl %edi,TI_flags(%r11)
13421 jz \exit
13422 CLEAR_RREGS -ARGOFFSET
13423 jmp int_with_check
13424@@ -237,7 +291,7 @@ sysexit_audit:
13425
13426 sysenter_tracesys:
13427 #ifdef CONFIG_AUDITSYSCALL
13428- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13429+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
13430 jz sysenter_auditsys
13431 #endif
13432 SAVE_REST
13433@@ -249,6 +303,9 @@ sysenter_tracesys:
13434 RESTORE_REST
13435 cmpq $(IA32_NR_syscalls-1),%rax
13436 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
13437+
13438+ pax_erase_kstack
13439+
13440 jmp sysenter_do_call
13441 CFI_ENDPROC
13442 ENDPROC(ia32_sysenter_target)
13443@@ -276,19 +333,25 @@ ENDPROC(ia32_sysenter_target)
13444 ENTRY(ia32_cstar_target)
13445 CFI_STARTPROC32 simple
13446 CFI_SIGNAL_FRAME
13447- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
13448+ CFI_DEF_CFA rsp,0
13449 CFI_REGISTER rip,rcx
13450 /*CFI_REGISTER rflags,r11*/
13451 SWAPGS_UNSAFE_STACK
13452 movl %esp,%r8d
13453 CFI_REGISTER rsp,r8
13454 movq PER_CPU_VAR(kernel_stack),%rsp
13455+ SAVE_ARGS 8*6,0,0
13456+ pax_enter_kernel_user
13457+
13458+#ifdef CONFIG_PAX_RANDKSTACK
13459+ pax_erase_kstack
13460+#endif
13461+
13462 /*
13463 * No need to follow this irqs on/off section: the syscall
13464 * disabled irqs and here we enable it straight after entry:
13465 */
13466 ENABLE_INTERRUPTS(CLBR_NONE)
13467- SAVE_ARGS 8,0,0
13468 movl %eax,%eax /* zero extension */
13469 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
13470 movq %rcx,RIP-ARGOFFSET(%rsp)
13471@@ -304,12 +367,25 @@ ENTRY(ia32_cstar_target)
13472 /* no need to do an access_ok check here because r8 has been
13473 32bit zero extended */
13474 /* hardware stack frame is complete now */
13475+
13476+#ifdef CONFIG_PAX_MEMORY_UDEREF
13477+ ASM_PAX_OPEN_USERLAND
13478+ movq pax_user_shadow_base,%r8
13479+ addq RSP-ARGOFFSET(%rsp),%r8
13480+#endif
13481+
13482 ASM_STAC
13483 1: movl (%r8),%r9d
13484 _ASM_EXTABLE(1b,ia32_badarg)
13485 ASM_CLAC
13486- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13487- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13488+
13489+#ifdef CONFIG_PAX_MEMORY_UDEREF
13490+ ASM_PAX_CLOSE_USERLAND
13491+#endif
13492+
13493+ GET_THREAD_INFO(%r11)
13494+ orl $TS_COMPAT,TI_status(%r11)
13495+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
13496 CFI_REMEMBER_STATE
13497 jnz cstar_tracesys
13498 cmpq $IA32_NR_syscalls-1,%rax
13499@@ -319,12 +395,15 @@ cstar_do_call:
13500 cstar_dispatch:
13501 call *ia32_sys_call_table(,%rax,8)
13502 movq %rax,RAX-ARGOFFSET(%rsp)
13503+ GET_THREAD_INFO(%r11)
13504 DISABLE_INTERRUPTS(CLBR_NONE)
13505 TRACE_IRQS_OFF
13506- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13507+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
13508 jnz sysretl_audit
13509 sysretl_from_sys_call:
13510- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13511+ pax_exit_kernel_user
13512+ pax_erase_kstack
13513+ andl $~TS_COMPAT,TI_status(%r11)
13514 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
13515 movl RIP-ARGOFFSET(%rsp),%ecx
13516 CFI_REGISTER rip,rcx
13517@@ -352,7 +431,7 @@ sysretl_audit:
13518
13519 cstar_tracesys:
13520 #ifdef CONFIG_AUDITSYSCALL
13521- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13522+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
13523 jz cstar_auditsys
13524 #endif
13525 xchgl %r9d,%ebp
13526@@ -366,11 +445,19 @@ cstar_tracesys:
13527 xchgl %ebp,%r9d
13528 cmpq $(IA32_NR_syscalls-1),%rax
13529 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
13530+
13531+ pax_erase_kstack
13532+
13533 jmp cstar_do_call
13534 END(ia32_cstar_target)
13535
13536 ia32_badarg:
13537 ASM_CLAC
13538+
13539+#ifdef CONFIG_PAX_MEMORY_UDEREF
13540+ ASM_PAX_CLOSE_USERLAND
13541+#endif
13542+
13543 movq $-EFAULT,%rax
13544 jmp ia32_sysret
13545 CFI_ENDPROC
13546@@ -407,19 +494,26 @@ ENTRY(ia32_syscall)
13547 CFI_REL_OFFSET rip,RIP-RIP
13548 PARAVIRT_ADJUST_EXCEPTION_FRAME
13549 SWAPGS
13550- /*
13551- * No need to follow this irqs on/off section: the syscall
13552- * disabled irqs and here we enable it straight after entry:
13553- */
13554- ENABLE_INTERRUPTS(CLBR_NONE)
13555 movl %eax,%eax
13556 pushq_cfi %rax
13557 cld
13558 /* note the registers are not zero extended to the sf.
13559 this could be a problem. */
13560 SAVE_ARGS 0,1,0
13561- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13562- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13563+ pax_enter_kernel_user
13564+
13565+#ifdef CONFIG_PAX_RANDKSTACK
13566+ pax_erase_kstack
13567+#endif
13568+
13569+ /*
13570+ * No need to follow this irqs on/off section: the syscall
13571+ * disabled irqs and here we enable it straight after entry:
13572+ */
13573+ ENABLE_INTERRUPTS(CLBR_NONE)
13574+ GET_THREAD_INFO(%r11)
13575+ orl $TS_COMPAT,TI_status(%r11)
13576+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
13577 jnz ia32_tracesys
13578 cmpq $(IA32_NR_syscalls-1),%rax
13579 ja ia32_badsys
13580@@ -442,6 +536,9 @@ ia32_tracesys:
13581 RESTORE_REST
13582 cmpq $(IA32_NR_syscalls-1),%rax
13583 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
13584+
13585+ pax_erase_kstack
13586+
13587 jmp ia32_do_call
13588 END(ia32_syscall)
13589
13590diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
13591index 8e0ceec..af13504 100644
13592--- a/arch/x86/ia32/sys_ia32.c
13593+++ b/arch/x86/ia32/sys_ia32.c
13594@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
13595 */
13596 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
13597 {
13598- typeof(ubuf->st_uid) uid = 0;
13599- typeof(ubuf->st_gid) gid = 0;
13600+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
13601+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
13602 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
13603 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
13604 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
13605diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
13606index 372231c..a5aa1a1 100644
13607--- a/arch/x86/include/asm/alternative-asm.h
13608+++ b/arch/x86/include/asm/alternative-asm.h
13609@@ -18,6 +18,45 @@
13610 .endm
13611 #endif
13612
13613+#ifdef KERNEXEC_PLUGIN
13614+ .macro pax_force_retaddr_bts rip=0
13615+ btsq $63,\rip(%rsp)
13616+ .endm
13617+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
13618+ .macro pax_force_retaddr rip=0, reload=0
13619+ btsq $63,\rip(%rsp)
13620+ .endm
13621+ .macro pax_force_fptr ptr
13622+ btsq $63,\ptr
13623+ .endm
13624+ .macro pax_set_fptr_mask
13625+ .endm
13626+#endif
13627+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
13628+ .macro pax_force_retaddr rip=0, reload=0
13629+ .if \reload
13630+ pax_set_fptr_mask
13631+ .endif
13632+ orq %r10,\rip(%rsp)
13633+ .endm
13634+ .macro pax_force_fptr ptr
13635+ orq %r10,\ptr
13636+ .endm
13637+ .macro pax_set_fptr_mask
13638+ movabs $0x8000000000000000,%r10
13639+ .endm
13640+#endif
13641+#else
13642+ .macro pax_force_retaddr rip=0, reload=0
13643+ .endm
13644+ .macro pax_force_fptr ptr
13645+ .endm
13646+ .macro pax_force_retaddr_bts rip=0
13647+ .endm
13648+ .macro pax_set_fptr_mask
13649+ .endm
13650+#endif
13651+
13652 .macro altinstruction_entry orig alt feature orig_len alt_len
13653 .long \orig - .
13654 .long \alt - .
13655diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
13656index 58ed6d9..f1cbe58 100644
13657--- a/arch/x86/include/asm/alternative.h
13658+++ b/arch/x86/include/asm/alternative.h
13659@@ -105,7 +105,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
13660 ".pushsection .discard,\"aw\",@progbits\n" \
13661 DISCARD_ENTRY(1) \
13662 ".popsection\n" \
13663- ".pushsection .altinstr_replacement, \"ax\"\n" \
13664+ ".pushsection .altinstr_replacement, \"a\"\n" \
13665 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
13666 ".popsection"
13667
13668@@ -119,7 +119,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
13669 DISCARD_ENTRY(1) \
13670 DISCARD_ENTRY(2) \
13671 ".popsection\n" \
13672- ".pushsection .altinstr_replacement, \"ax\"\n" \
13673+ ".pushsection .altinstr_replacement, \"a\"\n" \
13674 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
13675 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
13676 ".popsection"
13677diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
13678index 3388034..050f0b9 100644
13679--- a/arch/x86/include/asm/apic.h
13680+++ b/arch/x86/include/asm/apic.h
13681@@ -44,7 +44,7 @@ static inline void generic_apic_probe(void)
13682
13683 #ifdef CONFIG_X86_LOCAL_APIC
13684
13685-extern unsigned int apic_verbosity;
13686+extern int apic_verbosity;
13687 extern int local_apic_timer_c2_ok;
13688
13689 extern int disable_apic;
13690diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
13691index 20370c6..a2eb9b0 100644
13692--- a/arch/x86/include/asm/apm.h
13693+++ b/arch/x86/include/asm/apm.h
13694@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
13695 __asm__ __volatile__(APM_DO_ZERO_SEGS
13696 "pushl %%edi\n\t"
13697 "pushl %%ebp\n\t"
13698- "lcall *%%cs:apm_bios_entry\n\t"
13699+ "lcall *%%ss:apm_bios_entry\n\t"
13700 "setc %%al\n\t"
13701 "popl %%ebp\n\t"
13702 "popl %%edi\n\t"
13703@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
13704 __asm__ __volatile__(APM_DO_ZERO_SEGS
13705 "pushl %%edi\n\t"
13706 "pushl %%ebp\n\t"
13707- "lcall *%%cs:apm_bios_entry\n\t"
13708+ "lcall *%%ss:apm_bios_entry\n\t"
13709 "setc %%bl\n\t"
13710 "popl %%ebp\n\t"
13711 "popl %%edi\n\t"
13712diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
13713index 722aa3b..3a0bb27 100644
13714--- a/arch/x86/include/asm/atomic.h
13715+++ b/arch/x86/include/asm/atomic.h
13716@@ -22,7 +22,18 @@
13717 */
13718 static inline int atomic_read(const atomic_t *v)
13719 {
13720- return (*(volatile int *)&(v)->counter);
13721+ return (*(volatile const int *)&(v)->counter);
13722+}
13723+
13724+/**
13725+ * atomic_read_unchecked - read atomic variable
13726+ * @v: pointer of type atomic_unchecked_t
13727+ *
13728+ * Atomically reads the value of @v.
13729+ */
13730+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
13731+{
13732+ return (*(volatile const int *)&(v)->counter);
13733 }
13734
13735 /**
13736@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
13737 }
13738
13739 /**
13740+ * atomic_set_unchecked - set atomic variable
13741+ * @v: pointer of type atomic_unchecked_t
13742+ * @i: required value
13743+ *
13744+ * Atomically sets the value of @v to @i.
13745+ */
13746+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
13747+{
13748+ v->counter = i;
13749+}
13750+
13751+/**
13752 * atomic_add - add integer to atomic variable
13753 * @i: integer value to add
13754 * @v: pointer of type atomic_t
13755@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
13756 */
13757 static inline void atomic_add(int i, atomic_t *v)
13758 {
13759- asm volatile(LOCK_PREFIX "addl %1,%0"
13760+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
13761+
13762+#ifdef CONFIG_PAX_REFCOUNT
13763+ "jno 0f\n"
13764+ LOCK_PREFIX "subl %1,%0\n"
13765+ "int $4\n0:\n"
13766+ _ASM_EXTABLE(0b, 0b)
13767+#endif
13768+
13769+ : "+m" (v->counter)
13770+ : "ir" (i));
13771+}
13772+
13773+/**
13774+ * atomic_add_unchecked - add integer to atomic variable
13775+ * @i: integer value to add
13776+ * @v: pointer of type atomic_unchecked_t
13777+ *
13778+ * Atomically adds @i to @v.
13779+ */
13780+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
13781+{
13782+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
13783 : "+m" (v->counter)
13784 : "ir" (i));
13785 }
13786@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
13787 */
13788 static inline void atomic_sub(int i, atomic_t *v)
13789 {
13790- asm volatile(LOCK_PREFIX "subl %1,%0"
13791+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
13792+
13793+#ifdef CONFIG_PAX_REFCOUNT
13794+ "jno 0f\n"
13795+ LOCK_PREFIX "addl %1,%0\n"
13796+ "int $4\n0:\n"
13797+ _ASM_EXTABLE(0b, 0b)
13798+#endif
13799+
13800+ : "+m" (v->counter)
13801+ : "ir" (i));
13802+}
13803+
13804+/**
13805+ * atomic_sub_unchecked - subtract integer from atomic variable
13806+ * @i: integer value to subtract
13807+ * @v: pointer of type atomic_unchecked_t
13808+ *
13809+ * Atomically subtracts @i from @v.
13810+ */
13811+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
13812+{
13813+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
13814 : "+m" (v->counter)
13815 : "ir" (i));
13816 }
13817@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
13818 {
13819 unsigned char c;
13820
13821- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
13822+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
13823+
13824+#ifdef CONFIG_PAX_REFCOUNT
13825+ "jno 0f\n"
13826+ LOCK_PREFIX "addl %2,%0\n"
13827+ "int $4\n0:\n"
13828+ _ASM_EXTABLE(0b, 0b)
13829+#endif
13830+
13831+ "sete %1\n"
13832 : "+m" (v->counter), "=qm" (c)
13833 : "ir" (i) : "memory");
13834 return c;
13835@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
13836 */
13837 static inline void atomic_inc(atomic_t *v)
13838 {
13839- asm volatile(LOCK_PREFIX "incl %0"
13840+ asm volatile(LOCK_PREFIX "incl %0\n"
13841+
13842+#ifdef CONFIG_PAX_REFCOUNT
13843+ "jno 0f\n"
13844+ LOCK_PREFIX "decl %0\n"
13845+ "int $4\n0:\n"
13846+ _ASM_EXTABLE(0b, 0b)
13847+#endif
13848+
13849+ : "+m" (v->counter));
13850+}
13851+
13852+/**
13853+ * atomic_inc_unchecked - increment atomic variable
13854+ * @v: pointer of type atomic_unchecked_t
13855+ *
13856+ * Atomically increments @v by 1.
13857+ */
13858+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
13859+{
13860+ asm volatile(LOCK_PREFIX "incl %0\n"
13861 : "+m" (v->counter));
13862 }
13863
13864@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
13865 */
13866 static inline void atomic_dec(atomic_t *v)
13867 {
13868- asm volatile(LOCK_PREFIX "decl %0"
13869+ asm volatile(LOCK_PREFIX "decl %0\n"
13870+
13871+#ifdef CONFIG_PAX_REFCOUNT
13872+ "jno 0f\n"
13873+ LOCK_PREFIX "incl %0\n"
13874+ "int $4\n0:\n"
13875+ _ASM_EXTABLE(0b, 0b)
13876+#endif
13877+
13878+ : "+m" (v->counter));
13879+}
13880+
13881+/**
13882+ * atomic_dec_unchecked - decrement atomic variable
13883+ * @v: pointer of type atomic_unchecked_t
13884+ *
13885+ * Atomically decrements @v by 1.
13886+ */
13887+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
13888+{
13889+ asm volatile(LOCK_PREFIX "decl %0\n"
13890 : "+m" (v->counter));
13891 }
13892
13893@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
13894 {
13895 unsigned char c;
13896
13897- asm volatile(LOCK_PREFIX "decl %0; sete %1"
13898+ asm volatile(LOCK_PREFIX "decl %0\n"
13899+
13900+#ifdef CONFIG_PAX_REFCOUNT
13901+ "jno 0f\n"
13902+ LOCK_PREFIX "incl %0\n"
13903+ "int $4\n0:\n"
13904+ _ASM_EXTABLE(0b, 0b)
13905+#endif
13906+
13907+ "sete %1\n"
13908 : "+m" (v->counter), "=qm" (c)
13909 : : "memory");
13910 return c != 0;
13911@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
13912 {
13913 unsigned char c;
13914
13915- asm volatile(LOCK_PREFIX "incl %0; sete %1"
13916+ asm volatile(LOCK_PREFIX "incl %0\n"
13917+
13918+#ifdef CONFIG_PAX_REFCOUNT
13919+ "jno 0f\n"
13920+ LOCK_PREFIX "decl %0\n"
13921+ "int $4\n0:\n"
13922+ _ASM_EXTABLE(0b, 0b)
13923+#endif
13924+
13925+ "sete %1\n"
13926+ : "+m" (v->counter), "=qm" (c)
13927+ : : "memory");
13928+ return c != 0;
13929+}
13930+
13931+/**
13932+ * atomic_inc_and_test_unchecked - increment and test
13933+ * @v: pointer of type atomic_unchecked_t
13934+ *
13935+ * Atomically increments @v by 1
13936+ * and returns true if the result is zero, or false for all
13937+ * other cases.
13938+ */
13939+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
13940+{
13941+ unsigned char c;
13942+
13943+ asm volatile(LOCK_PREFIX "incl %0\n"
13944+ "sete %1\n"
13945 : "+m" (v->counter), "=qm" (c)
13946 : : "memory");
13947 return c != 0;
13948@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
13949 {
13950 unsigned char c;
13951
13952- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
13953+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
13954+
13955+#ifdef CONFIG_PAX_REFCOUNT
13956+ "jno 0f\n"
13957+ LOCK_PREFIX "subl %2,%0\n"
13958+ "int $4\n0:\n"
13959+ _ASM_EXTABLE(0b, 0b)
13960+#endif
13961+
13962+ "sets %1\n"
13963 : "+m" (v->counter), "=qm" (c)
13964 : "ir" (i) : "memory");
13965 return c;
13966@@ -172,6 +334,18 @@ static inline int atomic_add_negative(int i, atomic_t *v)
13967 */
13968 static inline int atomic_add_return(int i, atomic_t *v)
13969 {
13970+ return i + xadd_check_overflow(&v->counter, i);
13971+}
13972+
13973+/**
13974+ * atomic_add_return_unchecked - add integer and return
13975+ * @i: integer value to add
13976+ * @v: pointer of type atomic_unchecked_t
13977+ *
13978+ * Atomically adds @i to @v and returns @i + @v
13979+ */
13980+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
13981+{
13982 return i + xadd(&v->counter, i);
13983 }
13984
13985@@ -188,6 +362,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
13986 }
13987
13988 #define atomic_inc_return(v) (atomic_add_return(1, v))
13989+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
13990+{
13991+ return atomic_add_return_unchecked(1, v);
13992+}
13993 #define atomic_dec_return(v) (atomic_sub_return(1, v))
13994
13995 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
13996@@ -195,11 +373,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
13997 return cmpxchg(&v->counter, old, new);
13998 }
13999
14000+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
14001+{
14002+ return cmpxchg(&v->counter, old, new);
14003+}
14004+
14005 static inline int atomic_xchg(atomic_t *v, int new)
14006 {
14007 return xchg(&v->counter, new);
14008 }
14009
14010+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
14011+{
14012+ return xchg(&v->counter, new);
14013+}
14014+
14015 /**
14016 * __atomic_add_unless - add unless the number is already a given value
14017 * @v: pointer of type atomic_t
14018@@ -211,12 +399,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
14019 */
14020 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
14021 {
14022- int c, old;
14023+ int c, old, new;
14024 c = atomic_read(v);
14025 for (;;) {
14026- if (unlikely(c == (u)))
14027+ if (unlikely(c == u))
14028 break;
14029- old = atomic_cmpxchg((v), c, c + (a));
14030+
14031+ asm volatile("addl %2,%0\n"
14032+
14033+#ifdef CONFIG_PAX_REFCOUNT
14034+ "jno 0f\n"
14035+ "subl %2,%0\n"
14036+ "int $4\n0:\n"
14037+ _ASM_EXTABLE(0b, 0b)
14038+#endif
14039+
14040+ : "=r" (new)
14041+ : "0" (c), "ir" (a));
14042+
14043+ old = atomic_cmpxchg(v, c, new);
14044 if (likely(old == c))
14045 break;
14046 c = old;
14047@@ -225,6 +426,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
14048 }
14049
14050 /**
14051+ * atomic_inc_not_zero_hint - increment if not null
14052+ * @v: pointer of type atomic_t
14053+ * @hint: probable value of the atomic before the increment
14054+ *
14055+ * This version of atomic_inc_not_zero() gives a hint of probable
14056+ * value of the atomic. This helps processor to not read the memory
14057+ * before doing the atomic read/modify/write cycle, lowering
14058+ * number of bus transactions on some arches.
14059+ *
14060+ * Returns: 0 if increment was not done, 1 otherwise.
14061+ */
14062+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
14063+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
14064+{
14065+ int val, c = hint, new;
14066+
14067+ /* sanity test, should be removed by compiler if hint is a constant */
14068+ if (!hint)
14069+ return __atomic_add_unless(v, 1, 0);
14070+
14071+ do {
14072+ asm volatile("incl %0\n"
14073+
14074+#ifdef CONFIG_PAX_REFCOUNT
14075+ "jno 0f\n"
14076+ "decl %0\n"
14077+ "int $4\n0:\n"
14078+ _ASM_EXTABLE(0b, 0b)
14079+#endif
14080+
14081+ : "=r" (new)
14082+ : "0" (c));
14083+
14084+ val = atomic_cmpxchg(v, c, new);
14085+ if (val == c)
14086+ return 1;
14087+ c = val;
14088+ } while (c);
14089+
14090+ return 0;
14091+}
14092+
14093+/**
14094 * atomic_inc_short - increment of a short integer
14095 * @v: pointer to type int
14096 *
14097@@ -253,14 +497,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
14098 #endif
14099
14100 /* These are x86-specific, used by some header files */
14101-#define atomic_clear_mask(mask, addr) \
14102- asm volatile(LOCK_PREFIX "andl %0,%1" \
14103- : : "r" (~(mask)), "m" (*(addr)) : "memory")
14104+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
14105+{
14106+ asm volatile(LOCK_PREFIX "andl %1,%0"
14107+ : "+m" (v->counter)
14108+ : "r" (~(mask))
14109+ : "memory");
14110+}
14111
14112-#define atomic_set_mask(mask, addr) \
14113- asm volatile(LOCK_PREFIX "orl %0,%1" \
14114- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
14115- : "memory")
14116+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
14117+{
14118+ asm volatile(LOCK_PREFIX "andl %1,%0"
14119+ : "+m" (v->counter)
14120+ : "r" (~(mask))
14121+ : "memory");
14122+}
14123+
14124+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
14125+{
14126+ asm volatile(LOCK_PREFIX "orl %1,%0"
14127+ : "+m" (v->counter)
14128+ : "r" (mask)
14129+ : "memory");
14130+}
14131+
14132+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
14133+{
14134+ asm volatile(LOCK_PREFIX "orl %1,%0"
14135+ : "+m" (v->counter)
14136+ : "r" (mask)
14137+ : "memory");
14138+}
14139
14140 /* Atomic operations are already serializing on x86 */
14141 #define smp_mb__before_atomic_dec() barrier()
14142diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
14143index b154de7..aadebd8 100644
14144--- a/arch/x86/include/asm/atomic64_32.h
14145+++ b/arch/x86/include/asm/atomic64_32.h
14146@@ -12,6 +12,14 @@ typedef struct {
14147 u64 __aligned(8) counter;
14148 } atomic64_t;
14149
14150+#ifdef CONFIG_PAX_REFCOUNT
14151+typedef struct {
14152+ u64 __aligned(8) counter;
14153+} atomic64_unchecked_t;
14154+#else
14155+typedef atomic64_t atomic64_unchecked_t;
14156+#endif
14157+
14158 #define ATOMIC64_INIT(val) { (val) }
14159
14160 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
14161@@ -37,21 +45,31 @@ typedef struct {
14162 ATOMIC64_DECL_ONE(sym##_386)
14163
14164 ATOMIC64_DECL_ONE(add_386);
14165+ATOMIC64_DECL_ONE(add_unchecked_386);
14166 ATOMIC64_DECL_ONE(sub_386);
14167+ATOMIC64_DECL_ONE(sub_unchecked_386);
14168 ATOMIC64_DECL_ONE(inc_386);
14169+ATOMIC64_DECL_ONE(inc_unchecked_386);
14170 ATOMIC64_DECL_ONE(dec_386);
14171+ATOMIC64_DECL_ONE(dec_unchecked_386);
14172 #endif
14173
14174 #define alternative_atomic64(f, out, in...) \
14175 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
14176
14177 ATOMIC64_DECL(read);
14178+ATOMIC64_DECL(read_unchecked);
14179 ATOMIC64_DECL(set);
14180+ATOMIC64_DECL(set_unchecked);
14181 ATOMIC64_DECL(xchg);
14182 ATOMIC64_DECL(add_return);
14183+ATOMIC64_DECL(add_return_unchecked);
14184 ATOMIC64_DECL(sub_return);
14185+ATOMIC64_DECL(sub_return_unchecked);
14186 ATOMIC64_DECL(inc_return);
14187+ATOMIC64_DECL(inc_return_unchecked);
14188 ATOMIC64_DECL(dec_return);
14189+ATOMIC64_DECL(dec_return_unchecked);
14190 ATOMIC64_DECL(dec_if_positive);
14191 ATOMIC64_DECL(inc_not_zero);
14192 ATOMIC64_DECL(add_unless);
14193@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
14194 }
14195
14196 /**
14197+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
14198+ * @p: pointer to type atomic64_unchecked_t
14199+ * @o: expected value
14200+ * @n: new value
14201+ *
14202+ * Atomically sets @v to @n if it was equal to @o and returns
14203+ * the old value.
14204+ */
14205+
14206+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
14207+{
14208+ return cmpxchg64(&v->counter, o, n);
14209+}
14210+
14211+/**
14212 * atomic64_xchg - xchg atomic64 variable
14213 * @v: pointer to type atomic64_t
14214 * @n: value to assign
14215@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
14216 }
14217
14218 /**
14219+ * atomic64_set_unchecked - set atomic64 variable
14220+ * @v: pointer to type atomic64_unchecked_t
14221+ * @n: value to assign
14222+ *
14223+ * Atomically sets the value of @v to @n.
14224+ */
14225+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
14226+{
14227+ unsigned high = (unsigned)(i >> 32);
14228+ unsigned low = (unsigned)i;
14229+ alternative_atomic64(set, /* no output */,
14230+ "S" (v), "b" (low), "c" (high)
14231+ : "eax", "edx", "memory");
14232+}
14233+
14234+/**
14235 * atomic64_read - read atomic64 variable
14236 * @v: pointer to type atomic64_t
14237 *
14238@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
14239 }
14240
14241 /**
14242+ * atomic64_read_unchecked - read atomic64 variable
14243+ * @v: pointer to type atomic64_unchecked_t
14244+ *
14245+ * Atomically reads the value of @v and returns it.
14246+ */
14247+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
14248+{
14249+ long long r;
14250+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
14251+ return r;
14252+ }
14253+
14254+/**
14255 * atomic64_add_return - add and return
14256 * @i: integer value to add
14257 * @v: pointer to type atomic64_t
14258@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
14259 return i;
14260 }
14261
14262+/**
14263+ * atomic64_add_return_unchecked - add and return
14264+ * @i: integer value to add
14265+ * @v: pointer to type atomic64_unchecked_t
14266+ *
14267+ * Atomically adds @i to @v and returns @i + *@v
14268+ */
14269+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
14270+{
14271+ alternative_atomic64(add_return_unchecked,
14272+ ASM_OUTPUT2("+A" (i), "+c" (v)),
14273+ ASM_NO_INPUT_CLOBBER("memory"));
14274+ return i;
14275+}
14276+
14277 /*
14278 * Other variants with different arithmetic operators:
14279 */
14280@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
14281 return a;
14282 }
14283
14284+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
14285+{
14286+ long long a;
14287+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
14288+ "S" (v) : "memory", "ecx");
14289+ return a;
14290+}
14291+
14292 static inline long long atomic64_dec_return(atomic64_t *v)
14293 {
14294 long long a;
14295@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
14296 }
14297
14298 /**
14299+ * atomic64_add_unchecked - add integer to atomic64 variable
14300+ * @i: integer value to add
14301+ * @v: pointer to type atomic64_unchecked_t
14302+ *
14303+ * Atomically adds @i to @v.
14304+ */
14305+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
14306+{
14307+ __alternative_atomic64(add_unchecked, add_return_unchecked,
14308+ ASM_OUTPUT2("+A" (i), "+c" (v)),
14309+ ASM_NO_INPUT_CLOBBER("memory"));
14310+ return i;
14311+}
14312+
14313+/**
14314 * atomic64_sub - subtract the atomic64 variable
14315 * @i: integer value to subtract
14316 * @v: pointer to type atomic64_t
14317diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
14318index 0e1cbfc..5623683 100644
14319--- a/arch/x86/include/asm/atomic64_64.h
14320+++ b/arch/x86/include/asm/atomic64_64.h
14321@@ -18,7 +18,19 @@
14322 */
14323 static inline long atomic64_read(const atomic64_t *v)
14324 {
14325- return (*(volatile long *)&(v)->counter);
14326+ return (*(volatile const long *)&(v)->counter);
14327+}
14328+
14329+/**
14330+ * atomic64_read_unchecked - read atomic64 variable
14331+ * @v: pointer of type atomic64_unchecked_t
14332+ *
14333+ * Atomically reads the value of @v.
14334+ * Doesn't imply a read memory barrier.
14335+ */
14336+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
14337+{
14338+ return (*(volatile const long *)&(v)->counter);
14339 }
14340
14341 /**
14342@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
14343 }
14344
14345 /**
14346+ * atomic64_set_unchecked - set atomic64 variable
14347+ * @v: pointer to type atomic64_unchecked_t
14348+ * @i: required value
14349+ *
14350+ * Atomically sets the value of @v to @i.
14351+ */
14352+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
14353+{
14354+ v->counter = i;
14355+}
14356+
14357+/**
14358 * atomic64_add - add integer to atomic64 variable
14359 * @i: integer value to add
14360 * @v: pointer to type atomic64_t
14361@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
14362 */
14363 static inline void atomic64_add(long i, atomic64_t *v)
14364 {
14365+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
14366+
14367+#ifdef CONFIG_PAX_REFCOUNT
14368+ "jno 0f\n"
14369+ LOCK_PREFIX "subq %1,%0\n"
14370+ "int $4\n0:\n"
14371+ _ASM_EXTABLE(0b, 0b)
14372+#endif
14373+
14374+ : "=m" (v->counter)
14375+ : "er" (i), "m" (v->counter));
14376+}
14377+
14378+/**
14379+ * atomic64_add_unchecked - add integer to atomic64 variable
14380+ * @i: integer value to add
14381+ * @v: pointer to type atomic64_unchecked_t
14382+ *
14383+ * Atomically adds @i to @v.
14384+ */
14385+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
14386+{
14387 asm volatile(LOCK_PREFIX "addq %1,%0"
14388 : "=m" (v->counter)
14389 : "er" (i), "m" (v->counter));
14390@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
14391 */
14392 static inline void atomic64_sub(long i, atomic64_t *v)
14393 {
14394- asm volatile(LOCK_PREFIX "subq %1,%0"
14395+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
14396+
14397+#ifdef CONFIG_PAX_REFCOUNT
14398+ "jno 0f\n"
14399+ LOCK_PREFIX "addq %1,%0\n"
14400+ "int $4\n0:\n"
14401+ _ASM_EXTABLE(0b, 0b)
14402+#endif
14403+
14404+ : "=m" (v->counter)
14405+ : "er" (i), "m" (v->counter));
14406+}
14407+
14408+/**
14409+ * atomic64_sub_unchecked - subtract the atomic64 variable
14410+ * @i: integer value to subtract
14411+ * @v: pointer to type atomic64_unchecked_t
14412+ *
14413+ * Atomically subtracts @i from @v.
14414+ */
14415+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
14416+{
14417+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
14418 : "=m" (v->counter)
14419 : "er" (i), "m" (v->counter));
14420 }
14421@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
14422 {
14423 unsigned char c;
14424
14425- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
14426+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
14427+
14428+#ifdef CONFIG_PAX_REFCOUNT
14429+ "jno 0f\n"
14430+ LOCK_PREFIX "addq %2,%0\n"
14431+ "int $4\n0:\n"
14432+ _ASM_EXTABLE(0b, 0b)
14433+#endif
14434+
14435+ "sete %1\n"
14436 : "=m" (v->counter), "=qm" (c)
14437 : "er" (i), "m" (v->counter) : "memory");
14438 return c;
14439@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
14440 */
14441 static inline void atomic64_inc(atomic64_t *v)
14442 {
14443+ asm volatile(LOCK_PREFIX "incq %0\n"
14444+
14445+#ifdef CONFIG_PAX_REFCOUNT
14446+ "jno 0f\n"
14447+ LOCK_PREFIX "decq %0\n"
14448+ "int $4\n0:\n"
14449+ _ASM_EXTABLE(0b, 0b)
14450+#endif
14451+
14452+ : "=m" (v->counter)
14453+ : "m" (v->counter));
14454+}
14455+
14456+/**
14457+ * atomic64_inc_unchecked - increment atomic64 variable
14458+ * @v: pointer to type atomic64_unchecked_t
14459+ *
14460+ * Atomically increments @v by 1.
14461+ */
14462+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
14463+{
14464 asm volatile(LOCK_PREFIX "incq %0"
14465 : "=m" (v->counter)
14466 : "m" (v->counter));
14467@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
14468 */
14469 static inline void atomic64_dec(atomic64_t *v)
14470 {
14471- asm volatile(LOCK_PREFIX "decq %0"
14472+ asm volatile(LOCK_PREFIX "decq %0\n"
14473+
14474+#ifdef CONFIG_PAX_REFCOUNT
14475+ "jno 0f\n"
14476+ LOCK_PREFIX "incq %0\n"
14477+ "int $4\n0:\n"
14478+ _ASM_EXTABLE(0b, 0b)
14479+#endif
14480+
14481+ : "=m" (v->counter)
14482+ : "m" (v->counter));
14483+}
14484+
14485+/**
14486+ * atomic64_dec_unchecked - decrement atomic64 variable
14487+ * @v: pointer to type atomic64_t
14488+ *
14489+ * Atomically decrements @v by 1.
14490+ */
14491+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
14492+{
14493+ asm volatile(LOCK_PREFIX "decq %0\n"
14494 : "=m" (v->counter)
14495 : "m" (v->counter));
14496 }
14497@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
14498 {
14499 unsigned char c;
14500
14501- asm volatile(LOCK_PREFIX "decq %0; sete %1"
14502+ asm volatile(LOCK_PREFIX "decq %0\n"
14503+
14504+#ifdef CONFIG_PAX_REFCOUNT
14505+ "jno 0f\n"
14506+ LOCK_PREFIX "incq %0\n"
14507+ "int $4\n0:\n"
14508+ _ASM_EXTABLE(0b, 0b)
14509+#endif
14510+
14511+ "sete %1\n"
14512 : "=m" (v->counter), "=qm" (c)
14513 : "m" (v->counter) : "memory");
14514 return c != 0;
14515@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
14516 {
14517 unsigned char c;
14518
14519- asm volatile(LOCK_PREFIX "incq %0; sete %1"
14520+ asm volatile(LOCK_PREFIX "incq %0\n"
14521+
14522+#ifdef CONFIG_PAX_REFCOUNT
14523+ "jno 0f\n"
14524+ LOCK_PREFIX "decq %0\n"
14525+ "int $4\n0:\n"
14526+ _ASM_EXTABLE(0b, 0b)
14527+#endif
14528+
14529+ "sete %1\n"
14530 : "=m" (v->counter), "=qm" (c)
14531 : "m" (v->counter) : "memory");
14532 return c != 0;
14533@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
14534 {
14535 unsigned char c;
14536
14537- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
14538+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
14539+
14540+#ifdef CONFIG_PAX_REFCOUNT
14541+ "jno 0f\n"
14542+ LOCK_PREFIX "subq %2,%0\n"
14543+ "int $4\n0:\n"
14544+ _ASM_EXTABLE(0b, 0b)
14545+#endif
14546+
14547+ "sets %1\n"
14548 : "=m" (v->counter), "=qm" (c)
14549 : "er" (i), "m" (v->counter) : "memory");
14550 return c;
14551@@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
14552 */
14553 static inline long atomic64_add_return(long i, atomic64_t *v)
14554 {
14555+ return i + xadd_check_overflow(&v->counter, i);
14556+}
14557+
14558+/**
14559+ * atomic64_add_return_unchecked - add and return
14560+ * @i: integer value to add
14561+ * @v: pointer to type atomic64_unchecked_t
14562+ *
14563+ * Atomically adds @i to @v and returns @i + @v
14564+ */
14565+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
14566+{
14567 return i + xadd(&v->counter, i);
14568 }
14569
14570@@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
14571 }
14572
14573 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
14574+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
14575+{
14576+ return atomic64_add_return_unchecked(1, v);
14577+}
14578 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
14579
14580 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
14581@@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
14582 return cmpxchg(&v->counter, old, new);
14583 }
14584
14585+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
14586+{
14587+ return cmpxchg(&v->counter, old, new);
14588+}
14589+
14590 static inline long atomic64_xchg(atomic64_t *v, long new)
14591 {
14592 return xchg(&v->counter, new);
14593@@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
14594 */
14595 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
14596 {
14597- long c, old;
14598+ long c, old, new;
14599 c = atomic64_read(v);
14600 for (;;) {
14601- if (unlikely(c == (u)))
14602+ if (unlikely(c == u))
14603 break;
14604- old = atomic64_cmpxchg((v), c, c + (a));
14605+
14606+ asm volatile("add %2,%0\n"
14607+
14608+#ifdef CONFIG_PAX_REFCOUNT
14609+ "jno 0f\n"
14610+ "sub %2,%0\n"
14611+ "int $4\n0:\n"
14612+ _ASM_EXTABLE(0b, 0b)
14613+#endif
14614+
14615+ : "=r" (new)
14616+ : "0" (c), "ir" (a));
14617+
14618+ old = atomic64_cmpxchg(v, c, new);
14619 if (likely(old == c))
14620 break;
14621 c = old;
14622 }
14623- return c != (u);
14624+ return c != u;
14625 }
14626
14627 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
14628diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
14629index 6dfd019..28e188d 100644
14630--- a/arch/x86/include/asm/bitops.h
14631+++ b/arch/x86/include/asm/bitops.h
14632@@ -40,7 +40,7 @@
14633 * a mask operation on a byte.
14634 */
14635 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
14636-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
14637+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
14638 #define CONST_MASK(nr) (1 << ((nr) & 7))
14639
14640 /**
14641@@ -486,7 +486,7 @@ static inline int fls(int x)
14642 * at position 64.
14643 */
14644 #ifdef CONFIG_X86_64
14645-static __always_inline int fls64(__u64 x)
14646+static __always_inline long fls64(__u64 x)
14647 {
14648 int bitpos = -1;
14649 /*
14650diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
14651index 4fa687a..60f2d39 100644
14652--- a/arch/x86/include/asm/boot.h
14653+++ b/arch/x86/include/asm/boot.h
14654@@ -6,10 +6,15 @@
14655 #include <uapi/asm/boot.h>
14656
14657 /* Physical address where kernel should be loaded. */
14658-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
14659+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
14660 + (CONFIG_PHYSICAL_ALIGN - 1)) \
14661 & ~(CONFIG_PHYSICAL_ALIGN - 1))
14662
14663+#ifndef __ASSEMBLY__
14664+extern unsigned char __LOAD_PHYSICAL_ADDR[];
14665+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
14666+#endif
14667+
14668 /* Minimum kernel alignment, as a power of two */
14669 #ifdef CONFIG_X86_64
14670 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
14671diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
14672index 48f99f1..d78ebf9 100644
14673--- a/arch/x86/include/asm/cache.h
14674+++ b/arch/x86/include/asm/cache.h
14675@@ -5,12 +5,13 @@
14676
14677 /* L1 cache line size */
14678 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
14679-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
14680+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
14681
14682 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
14683+#define __read_only __attribute__((__section__(".data..read_only")))
14684
14685 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
14686-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
14687+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
14688
14689 #ifdef CONFIG_X86_VSMP
14690 #ifdef CONFIG_SMP
14691diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
14692index 9863ee3..4a1f8e1 100644
14693--- a/arch/x86/include/asm/cacheflush.h
14694+++ b/arch/x86/include/asm/cacheflush.h
14695@@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
14696 unsigned long pg_flags = pg->flags & _PGMT_MASK;
14697
14698 if (pg_flags == _PGMT_DEFAULT)
14699- return -1;
14700+ return ~0UL;
14701 else if (pg_flags == _PGMT_WC)
14702 return _PAGE_CACHE_WC;
14703 else if (pg_flags == _PGMT_UC_MINUS)
14704diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
14705index 46fc474..b02b0f9 100644
14706--- a/arch/x86/include/asm/checksum_32.h
14707+++ b/arch/x86/include/asm/checksum_32.h
14708@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
14709 int len, __wsum sum,
14710 int *src_err_ptr, int *dst_err_ptr);
14711
14712+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
14713+ int len, __wsum sum,
14714+ int *src_err_ptr, int *dst_err_ptr);
14715+
14716+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
14717+ int len, __wsum sum,
14718+ int *src_err_ptr, int *dst_err_ptr);
14719+
14720 /*
14721 * Note: when you get a NULL pointer exception here this means someone
14722 * passed in an incorrect kernel address to one of these functions.
14723@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
14724 int *err_ptr)
14725 {
14726 might_sleep();
14727- return csum_partial_copy_generic((__force void *)src, dst,
14728+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
14729 len, sum, err_ptr, NULL);
14730 }
14731
14732@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
14733 {
14734 might_sleep();
14735 if (access_ok(VERIFY_WRITE, dst, len))
14736- return csum_partial_copy_generic(src, (__force void *)dst,
14737+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
14738 len, sum, NULL, err_ptr);
14739
14740 if (len)
14741diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
14742index d47786a..ce1b05d 100644
14743--- a/arch/x86/include/asm/cmpxchg.h
14744+++ b/arch/x86/include/asm/cmpxchg.h
14745@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
14746 __compiletime_error("Bad argument size for cmpxchg");
14747 extern void __xadd_wrong_size(void)
14748 __compiletime_error("Bad argument size for xadd");
14749+extern void __xadd_check_overflow_wrong_size(void)
14750+ __compiletime_error("Bad argument size for xadd_check_overflow");
14751 extern void __add_wrong_size(void)
14752 __compiletime_error("Bad argument size for add");
14753+extern void __add_check_overflow_wrong_size(void)
14754+ __compiletime_error("Bad argument size for add_check_overflow");
14755
14756 /*
14757 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
14758@@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
14759 __ret; \
14760 })
14761
14762+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
14763+ ({ \
14764+ __typeof__ (*(ptr)) __ret = (arg); \
14765+ switch (sizeof(*(ptr))) { \
14766+ case __X86_CASE_L: \
14767+ asm volatile (lock #op "l %0, %1\n" \
14768+ "jno 0f\n" \
14769+ "mov %0,%1\n" \
14770+ "int $4\n0:\n" \
14771+ _ASM_EXTABLE(0b, 0b) \
14772+ : "+r" (__ret), "+m" (*(ptr)) \
14773+ : : "memory", "cc"); \
14774+ break; \
14775+ case __X86_CASE_Q: \
14776+ asm volatile (lock #op "q %q0, %1\n" \
14777+ "jno 0f\n" \
14778+ "mov %0,%1\n" \
14779+ "int $4\n0:\n" \
14780+ _ASM_EXTABLE(0b, 0b) \
14781+ : "+r" (__ret), "+m" (*(ptr)) \
14782+ : : "memory", "cc"); \
14783+ break; \
14784+ default: \
14785+ __ ## op ## _check_overflow_wrong_size(); \
14786+ } \
14787+ __ret; \
14788+ })
14789+
14790 /*
14791 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
14792 * Since this is generally used to protect other memory information, we
14793@@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
14794 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
14795 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
14796
14797+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
14798+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
14799+
14800 #define __add(ptr, inc, lock) \
14801 ({ \
14802 __typeof__ (*(ptr)) __ret = (inc); \
14803diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
14804index 59c6c40..5e0b22c 100644
14805--- a/arch/x86/include/asm/compat.h
14806+++ b/arch/x86/include/asm/compat.h
14807@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
14808 typedef u32 compat_uint_t;
14809 typedef u32 compat_ulong_t;
14810 typedef u64 __attribute__((aligned(4))) compat_u64;
14811-typedef u32 compat_uptr_t;
14812+typedef u32 __user compat_uptr_t;
14813
14814 struct compat_timespec {
14815 compat_time_t tv_sec;
14816diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
14817index e99ac27..10d834e 100644
14818--- a/arch/x86/include/asm/cpufeature.h
14819+++ b/arch/x86/include/asm/cpufeature.h
14820@@ -203,7 +203,7 @@
14821 #define X86_FEATURE_DECODEASSISTS (8*32+12) /* AMD Decode Assists support */
14822 #define X86_FEATURE_PAUSEFILTER (8*32+13) /* AMD filtered pause intercept */
14823 #define X86_FEATURE_PFTHRESHOLD (8*32+14) /* AMD pause filter threshold */
14824-
14825+#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
14826
14827 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
14828 #define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
14829@@ -211,7 +211,7 @@
14830 #define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
14831 #define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
14832 #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
14833-#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
14834+#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Prevention */
14835 #define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
14836 #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
14837 #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
14838@@ -353,6 +353,7 @@ extern const char * const x86_power_flags[32];
14839 #undef cpu_has_centaur_mcr
14840 #define cpu_has_centaur_mcr 0
14841
14842+#define cpu_has_pcid boot_cpu_has(X86_FEATURE_PCID)
14843 #endif /* CONFIG_X86_64 */
14844
14845 #if __GNUC__ >= 4
14846@@ -394,7 +395,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
14847 ".section .discard,\"aw\",@progbits\n"
14848 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
14849 ".previous\n"
14850- ".section .altinstr_replacement,\"ax\"\n"
14851+ ".section .altinstr_replacement,\"a\"\n"
14852 "3: movb $1,%0\n"
14853 "4:\n"
14854 ".previous\n"
14855diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
14856index 8bf1c06..b6ae785 100644
14857--- a/arch/x86/include/asm/desc.h
14858+++ b/arch/x86/include/asm/desc.h
14859@@ -4,6 +4,7 @@
14860 #include <asm/desc_defs.h>
14861 #include <asm/ldt.h>
14862 #include <asm/mmu.h>
14863+#include <asm/pgtable.h>
14864
14865 #include <linux/smp.h>
14866 #include <linux/percpu.h>
14867@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
14868
14869 desc->type = (info->read_exec_only ^ 1) << 1;
14870 desc->type |= info->contents << 2;
14871+ desc->type |= info->seg_not_present ^ 1;
14872
14873 desc->s = 1;
14874 desc->dpl = 0x3;
14875@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
14876 }
14877
14878 extern struct desc_ptr idt_descr;
14879-extern gate_desc idt_table[];
14880 extern struct desc_ptr nmi_idt_descr;
14881-extern gate_desc nmi_idt_table[];
14882-
14883-struct gdt_page {
14884- struct desc_struct gdt[GDT_ENTRIES];
14885-} __attribute__((aligned(PAGE_SIZE)));
14886-
14887-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
14888+extern gate_desc idt_table[256];
14889+extern gate_desc nmi_idt_table[256];
14890
14891+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
14892 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
14893 {
14894- return per_cpu(gdt_page, cpu).gdt;
14895+ return cpu_gdt_table[cpu];
14896 }
14897
14898 #ifdef CONFIG_X86_64
14899@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
14900 unsigned long base, unsigned dpl, unsigned flags,
14901 unsigned short seg)
14902 {
14903- gate->a = (seg << 16) | (base & 0xffff);
14904- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
14905+ gate->gate.offset_low = base;
14906+ gate->gate.seg = seg;
14907+ gate->gate.reserved = 0;
14908+ gate->gate.type = type;
14909+ gate->gate.s = 0;
14910+ gate->gate.dpl = dpl;
14911+ gate->gate.p = 1;
14912+ gate->gate.offset_high = base >> 16;
14913 }
14914
14915 #endif
14916@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
14917
14918 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
14919 {
14920+ pax_open_kernel();
14921 memcpy(&idt[entry], gate, sizeof(*gate));
14922+ pax_close_kernel();
14923 }
14924
14925 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
14926 {
14927+ pax_open_kernel();
14928 memcpy(&ldt[entry], desc, 8);
14929+ pax_close_kernel();
14930 }
14931
14932 static inline void
14933@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
14934 default: size = sizeof(*gdt); break;
14935 }
14936
14937+ pax_open_kernel();
14938 memcpy(&gdt[entry], desc, size);
14939+ pax_close_kernel();
14940 }
14941
14942 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
14943@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
14944
14945 static inline void native_load_tr_desc(void)
14946 {
14947+ pax_open_kernel();
14948 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
14949+ pax_close_kernel();
14950 }
14951
14952 static inline void native_load_gdt(const struct desc_ptr *dtr)
14953@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
14954 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
14955 unsigned int i;
14956
14957+ pax_open_kernel();
14958 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
14959 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
14960+ pax_close_kernel();
14961 }
14962
14963 #define _LDT_empty(info) \
14964@@ -287,7 +300,7 @@ static inline void load_LDT(mm_context_t *pc)
14965 preempt_enable();
14966 }
14967
14968-static inline unsigned long get_desc_base(const struct desc_struct *desc)
14969+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
14970 {
14971 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
14972 }
14973@@ -311,7 +324,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
14974 }
14975
14976 #ifdef CONFIG_X86_64
14977-static inline void set_nmi_gate(int gate, void *addr)
14978+static inline void set_nmi_gate(int gate, const void *addr)
14979 {
14980 gate_desc s;
14981
14982@@ -320,7 +333,7 @@ static inline void set_nmi_gate(int gate, void *addr)
14983 }
14984 #endif
14985
14986-static inline void _set_gate(int gate, unsigned type, void *addr,
14987+static inline void _set_gate(int gate, unsigned type, const void *addr,
14988 unsigned dpl, unsigned ist, unsigned seg)
14989 {
14990 gate_desc s;
14991@@ -339,7 +352,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
14992 * Pentium F0 0F bugfix can have resulted in the mapped
14993 * IDT being write-protected.
14994 */
14995-static inline void set_intr_gate(unsigned int n, void *addr)
14996+static inline void set_intr_gate(unsigned int n, const void *addr)
14997 {
14998 BUG_ON((unsigned)n > 0xFF);
14999 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
15000@@ -369,19 +382,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
15001 /*
15002 * This routine sets up an interrupt gate at directory privilege level 3.
15003 */
15004-static inline void set_system_intr_gate(unsigned int n, void *addr)
15005+static inline void set_system_intr_gate(unsigned int n, const void *addr)
15006 {
15007 BUG_ON((unsigned)n > 0xFF);
15008 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
15009 }
15010
15011-static inline void set_system_trap_gate(unsigned int n, void *addr)
15012+static inline void set_system_trap_gate(unsigned int n, const void *addr)
15013 {
15014 BUG_ON((unsigned)n > 0xFF);
15015 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
15016 }
15017
15018-static inline void set_trap_gate(unsigned int n, void *addr)
15019+static inline void set_trap_gate(unsigned int n, const void *addr)
15020 {
15021 BUG_ON((unsigned)n > 0xFF);
15022 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
15023@@ -390,19 +403,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
15024 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
15025 {
15026 BUG_ON((unsigned)n > 0xFF);
15027- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
15028+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
15029 }
15030
15031-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
15032+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
15033 {
15034 BUG_ON((unsigned)n > 0xFF);
15035 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
15036 }
15037
15038-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
15039+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
15040 {
15041 BUG_ON((unsigned)n > 0xFF);
15042 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
15043 }
15044
15045+#ifdef CONFIG_X86_32
15046+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
15047+{
15048+ struct desc_struct d;
15049+
15050+ if (likely(limit))
15051+ limit = (limit - 1UL) >> PAGE_SHIFT;
15052+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
15053+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
15054+}
15055+#endif
15056+
15057 #endif /* _ASM_X86_DESC_H */
15058diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
15059index 278441f..b95a174 100644
15060--- a/arch/x86/include/asm/desc_defs.h
15061+++ b/arch/x86/include/asm/desc_defs.h
15062@@ -31,6 +31,12 @@ struct desc_struct {
15063 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
15064 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
15065 };
15066+ struct {
15067+ u16 offset_low;
15068+ u16 seg;
15069+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
15070+ unsigned offset_high: 16;
15071+ } gate;
15072 };
15073 } __attribute__((packed));
15074
15075diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
15076index ced283a..ffe04cc 100644
15077--- a/arch/x86/include/asm/div64.h
15078+++ b/arch/x86/include/asm/div64.h
15079@@ -39,7 +39,7 @@
15080 __mod; \
15081 })
15082
15083-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
15084+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
15085 {
15086 union {
15087 u64 v64;
15088diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
15089index 9c999c1..3860cb8 100644
15090--- a/arch/x86/include/asm/elf.h
15091+++ b/arch/x86/include/asm/elf.h
15092@@ -243,7 +243,25 @@ extern int force_personality32;
15093 the loader. We need to make sure that it is out of the way of the program
15094 that it will "exec", and that there is sufficient room for the brk. */
15095
15096+#ifdef CONFIG_PAX_SEGMEXEC
15097+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
15098+#else
15099 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
15100+#endif
15101+
15102+#ifdef CONFIG_PAX_ASLR
15103+#ifdef CONFIG_X86_32
15104+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
15105+
15106+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
15107+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
15108+#else
15109+#define PAX_ELF_ET_DYN_BASE 0x400000UL
15110+
15111+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
15112+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
15113+#endif
15114+#endif
15115
15116 /* This yields a mask that user programs can use to figure out what
15117 instruction set this CPU supports. This could be done in user space,
15118@@ -296,16 +314,12 @@ do { \
15119
15120 #define ARCH_DLINFO \
15121 do { \
15122- if (vdso_enabled) \
15123- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
15124- (unsigned long)current->mm->context.vdso); \
15125+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
15126 } while (0)
15127
15128 #define ARCH_DLINFO_X32 \
15129 do { \
15130- if (vdso_enabled) \
15131- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
15132- (unsigned long)current->mm->context.vdso); \
15133+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
15134 } while (0)
15135
15136 #define AT_SYSINFO 32
15137@@ -320,7 +334,7 @@ else \
15138
15139 #endif /* !CONFIG_X86_32 */
15140
15141-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
15142+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
15143
15144 #define VDSO_ENTRY \
15145 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
15146@@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
15147 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
15148 #define compat_arch_setup_additional_pages syscall32_setup_pages
15149
15150-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
15151-#define arch_randomize_brk arch_randomize_brk
15152-
15153 /*
15154 * True on X86_32 or when emulating IA32 on X86_64
15155 */
15156diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
15157index 75ce3f4..882e801 100644
15158--- a/arch/x86/include/asm/emergency-restart.h
15159+++ b/arch/x86/include/asm/emergency-restart.h
15160@@ -13,6 +13,6 @@ enum reboot_type {
15161
15162 extern enum reboot_type reboot_type;
15163
15164-extern void machine_emergency_restart(void);
15165+extern void machine_emergency_restart(void) __noreturn;
15166
15167 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
15168diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
15169index e25cc33..7d3ec01 100644
15170--- a/arch/x86/include/asm/fpu-internal.h
15171+++ b/arch/x86/include/asm/fpu-internal.h
15172@@ -126,8 +126,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
15173 #define user_insn(insn, output, input...) \
15174 ({ \
15175 int err; \
15176+ pax_open_userland(); \
15177 asm volatile(ASM_STAC "\n" \
15178- "1:" #insn "\n\t" \
15179+ "1:" \
15180+ __copyuser_seg \
15181+ #insn "\n\t" \
15182 "2: " ASM_CLAC "\n" \
15183 ".section .fixup,\"ax\"\n" \
15184 "3: movl $-1,%[err]\n" \
15185@@ -136,6 +139,7 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
15186 _ASM_EXTABLE(1b, 3b) \
15187 : [err] "=r" (err), output \
15188 : "0"(0), input); \
15189+ pax_close_userland(); \
15190 err; \
15191 })
15192
15193@@ -300,7 +304,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
15194 "emms\n\t" /* clear stack tags */
15195 "fildl %P[addr]", /* set F?P to defined value */
15196 X86_FEATURE_FXSAVE_LEAK,
15197- [addr] "m" (tsk->thread.fpu.has_fpu));
15198+ [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
15199
15200 return fpu_restore_checking(&tsk->thread.fpu);
15201 }
15202diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
15203index be27ba1..04a8801 100644
15204--- a/arch/x86/include/asm/futex.h
15205+++ b/arch/x86/include/asm/futex.h
15206@@ -12,6 +12,7 @@
15207 #include <asm/smap.h>
15208
15209 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
15210+ typecheck(u32 __user *, uaddr); \
15211 asm volatile("\t" ASM_STAC "\n" \
15212 "1:\t" insn "\n" \
15213 "2:\t" ASM_CLAC "\n" \
15214@@ -20,15 +21,16 @@
15215 "\tjmp\t2b\n" \
15216 "\t.previous\n" \
15217 _ASM_EXTABLE(1b, 3b) \
15218- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
15219+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
15220 : "i" (-EFAULT), "0" (oparg), "1" (0))
15221
15222 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
15223+ typecheck(u32 __user *, uaddr); \
15224 asm volatile("\t" ASM_STAC "\n" \
15225 "1:\tmovl %2, %0\n" \
15226 "\tmovl\t%0, %3\n" \
15227 "\t" insn "\n" \
15228- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
15229+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
15230 "\tjnz\t1b\n" \
15231 "3:\t" ASM_CLAC "\n" \
15232 "\t.section .fixup,\"ax\"\n" \
15233@@ -38,7 +40,7 @@
15234 _ASM_EXTABLE(1b, 4b) \
15235 _ASM_EXTABLE(2b, 4b) \
15236 : "=&a" (oldval), "=&r" (ret), \
15237- "+m" (*uaddr), "=&r" (tem) \
15238+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
15239 : "r" (oparg), "i" (-EFAULT), "1" (0))
15240
15241 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
15242@@ -57,12 +59,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
15243
15244 pagefault_disable();
15245
15246+ pax_open_userland();
15247 switch (op) {
15248 case FUTEX_OP_SET:
15249- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
15250+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
15251 break;
15252 case FUTEX_OP_ADD:
15253- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
15254+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
15255 uaddr, oparg);
15256 break;
15257 case FUTEX_OP_OR:
15258@@ -77,6 +80,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
15259 default:
15260 ret = -ENOSYS;
15261 }
15262+ pax_close_userland();
15263
15264 pagefault_enable();
15265
15266@@ -115,18 +119,20 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
15267 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
15268 return -EFAULT;
15269
15270+ pax_open_userland();
15271 asm volatile("\t" ASM_STAC "\n"
15272- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
15273+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
15274 "2:\t" ASM_CLAC "\n"
15275 "\t.section .fixup, \"ax\"\n"
15276 "3:\tmov %3, %0\n"
15277 "\tjmp 2b\n"
15278 "\t.previous\n"
15279 _ASM_EXTABLE(1b, 3b)
15280- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
15281+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
15282 : "i" (-EFAULT), "r" (newval), "1" (oldval)
15283 : "memory"
15284 );
15285+ pax_close_userland();
15286
15287 *uval = oldval;
15288 return ret;
15289diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
15290index 1da97ef..9c2ebff 100644
15291--- a/arch/x86/include/asm/hw_irq.h
15292+++ b/arch/x86/include/asm/hw_irq.h
15293@@ -148,8 +148,8 @@ extern void setup_ioapic_dest(void);
15294 extern void enable_IO_APIC(void);
15295
15296 /* Statistics */
15297-extern atomic_t irq_err_count;
15298-extern atomic_t irq_mis_count;
15299+extern atomic_unchecked_t irq_err_count;
15300+extern atomic_unchecked_t irq_mis_count;
15301
15302 /* EISA */
15303 extern void eisa_set_level_irq(unsigned int irq);
15304diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
15305index a203659..9889f1c 100644
15306--- a/arch/x86/include/asm/i8259.h
15307+++ b/arch/x86/include/asm/i8259.h
15308@@ -62,7 +62,7 @@ struct legacy_pic {
15309 void (*init)(int auto_eoi);
15310 int (*irq_pending)(unsigned int irq);
15311 void (*make_irq)(unsigned int irq);
15312-};
15313+} __do_const;
15314
15315 extern struct legacy_pic *legacy_pic;
15316 extern struct legacy_pic null_legacy_pic;
15317diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
15318index d8e8eef..1765f78 100644
15319--- a/arch/x86/include/asm/io.h
15320+++ b/arch/x86/include/asm/io.h
15321@@ -51,12 +51,12 @@ static inline void name(type val, volatile void __iomem *addr) \
15322 "m" (*(volatile type __force *)addr) barrier); }
15323
15324 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
15325-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
15326-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
15327+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
15328+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
15329
15330 build_mmio_read(__readb, "b", unsigned char, "=q", )
15331-build_mmio_read(__readw, "w", unsigned short, "=r", )
15332-build_mmio_read(__readl, "l", unsigned int, "=r", )
15333+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
15334+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
15335
15336 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
15337 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
15338@@ -184,7 +184,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
15339 return ioremap_nocache(offset, size);
15340 }
15341
15342-extern void iounmap(volatile void __iomem *addr);
15343+extern void iounmap(const volatile void __iomem *addr);
15344
15345 extern void set_iounmap_nonlazy(void);
15346
15347@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
15348
15349 #include <linux/vmalloc.h>
15350
15351+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
15352+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
15353+{
15354+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
15355+}
15356+
15357+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
15358+{
15359+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
15360+}
15361+
15362 /*
15363 * Convert a virtual cached pointer to an uncached pointer
15364 */
15365diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
15366index bba3cf8..06bc8da 100644
15367--- a/arch/x86/include/asm/irqflags.h
15368+++ b/arch/x86/include/asm/irqflags.h
15369@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
15370 sti; \
15371 sysexit
15372
15373+#define GET_CR0_INTO_RDI mov %cr0, %rdi
15374+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
15375+#define GET_CR3_INTO_RDI mov %cr3, %rdi
15376+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
15377+
15378 #else
15379 #define INTERRUPT_RETURN iret
15380 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
15381diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
15382index 5a6d287..f815789 100644
15383--- a/arch/x86/include/asm/kprobes.h
15384+++ b/arch/x86/include/asm/kprobes.h
15385@@ -38,13 +38,8 @@ typedef u8 kprobe_opcode_t;
15386 #define RELATIVEJUMP_SIZE 5
15387 #define RELATIVECALL_OPCODE 0xe8
15388 #define RELATIVE_ADDR_SIZE 4
15389-#define MAX_STACK_SIZE 64
15390-#define MIN_STACK_SIZE(ADDR) \
15391- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
15392- THREAD_SIZE - (unsigned long)(ADDR))) \
15393- ? (MAX_STACK_SIZE) \
15394- : (((unsigned long)current_thread_info()) + \
15395- THREAD_SIZE - (unsigned long)(ADDR)))
15396+#define MAX_STACK_SIZE 64UL
15397+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
15398
15399 #define flush_insn_slot(p) do { } while (0)
15400
15401diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
15402index 2d89e39..baee879 100644
15403--- a/arch/x86/include/asm/local.h
15404+++ b/arch/x86/include/asm/local.h
15405@@ -10,33 +10,97 @@ typedef struct {
15406 atomic_long_t a;
15407 } local_t;
15408
15409+typedef struct {
15410+ atomic_long_unchecked_t a;
15411+} local_unchecked_t;
15412+
15413 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
15414
15415 #define local_read(l) atomic_long_read(&(l)->a)
15416+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
15417 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
15418+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
15419
15420 static inline void local_inc(local_t *l)
15421 {
15422- asm volatile(_ASM_INC "%0"
15423+ asm volatile(_ASM_INC "%0\n"
15424+
15425+#ifdef CONFIG_PAX_REFCOUNT
15426+ "jno 0f\n"
15427+ _ASM_DEC "%0\n"
15428+ "int $4\n0:\n"
15429+ _ASM_EXTABLE(0b, 0b)
15430+#endif
15431+
15432+ : "+m" (l->a.counter));
15433+}
15434+
15435+static inline void local_inc_unchecked(local_unchecked_t *l)
15436+{
15437+ asm volatile(_ASM_INC "%0\n"
15438 : "+m" (l->a.counter));
15439 }
15440
15441 static inline void local_dec(local_t *l)
15442 {
15443- asm volatile(_ASM_DEC "%0"
15444+ asm volatile(_ASM_DEC "%0\n"
15445+
15446+#ifdef CONFIG_PAX_REFCOUNT
15447+ "jno 0f\n"
15448+ _ASM_INC "%0\n"
15449+ "int $4\n0:\n"
15450+ _ASM_EXTABLE(0b, 0b)
15451+#endif
15452+
15453+ : "+m" (l->a.counter));
15454+}
15455+
15456+static inline void local_dec_unchecked(local_unchecked_t *l)
15457+{
15458+ asm volatile(_ASM_DEC "%0\n"
15459 : "+m" (l->a.counter));
15460 }
15461
15462 static inline void local_add(long i, local_t *l)
15463 {
15464- asm volatile(_ASM_ADD "%1,%0"
15465+ asm volatile(_ASM_ADD "%1,%0\n"
15466+
15467+#ifdef CONFIG_PAX_REFCOUNT
15468+ "jno 0f\n"
15469+ _ASM_SUB "%1,%0\n"
15470+ "int $4\n0:\n"
15471+ _ASM_EXTABLE(0b, 0b)
15472+#endif
15473+
15474+ : "+m" (l->a.counter)
15475+ : "ir" (i));
15476+}
15477+
15478+static inline void local_add_unchecked(long i, local_unchecked_t *l)
15479+{
15480+ asm volatile(_ASM_ADD "%1,%0\n"
15481 : "+m" (l->a.counter)
15482 : "ir" (i));
15483 }
15484
15485 static inline void local_sub(long i, local_t *l)
15486 {
15487- asm volatile(_ASM_SUB "%1,%0"
15488+ asm volatile(_ASM_SUB "%1,%0\n"
15489+
15490+#ifdef CONFIG_PAX_REFCOUNT
15491+ "jno 0f\n"
15492+ _ASM_ADD "%1,%0\n"
15493+ "int $4\n0:\n"
15494+ _ASM_EXTABLE(0b, 0b)
15495+#endif
15496+
15497+ : "+m" (l->a.counter)
15498+ : "ir" (i));
15499+}
15500+
15501+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
15502+{
15503+ asm volatile(_ASM_SUB "%1,%0\n"
15504 : "+m" (l->a.counter)
15505 : "ir" (i));
15506 }
15507@@ -54,7 +118,16 @@ static inline int local_sub_and_test(long i, local_t *l)
15508 {
15509 unsigned char c;
15510
15511- asm volatile(_ASM_SUB "%2,%0; sete %1"
15512+ asm volatile(_ASM_SUB "%2,%0\n"
15513+
15514+#ifdef CONFIG_PAX_REFCOUNT
15515+ "jno 0f\n"
15516+ _ASM_ADD "%2,%0\n"
15517+ "int $4\n0:\n"
15518+ _ASM_EXTABLE(0b, 0b)
15519+#endif
15520+
15521+ "sete %1\n"
15522 : "+m" (l->a.counter), "=qm" (c)
15523 : "ir" (i) : "memory");
15524 return c;
15525@@ -72,7 +145,16 @@ static inline int local_dec_and_test(local_t *l)
15526 {
15527 unsigned char c;
15528
15529- asm volatile(_ASM_DEC "%0; sete %1"
15530+ asm volatile(_ASM_DEC "%0\n"
15531+
15532+#ifdef CONFIG_PAX_REFCOUNT
15533+ "jno 0f\n"
15534+ _ASM_INC "%0\n"
15535+ "int $4\n0:\n"
15536+ _ASM_EXTABLE(0b, 0b)
15537+#endif
15538+
15539+ "sete %1\n"
15540 : "+m" (l->a.counter), "=qm" (c)
15541 : : "memory");
15542 return c != 0;
15543@@ -90,7 +172,16 @@ static inline int local_inc_and_test(local_t *l)
15544 {
15545 unsigned char c;
15546
15547- asm volatile(_ASM_INC "%0; sete %1"
15548+ asm volatile(_ASM_INC "%0\n"
15549+
15550+#ifdef CONFIG_PAX_REFCOUNT
15551+ "jno 0f\n"
15552+ _ASM_DEC "%0\n"
15553+ "int $4\n0:\n"
15554+ _ASM_EXTABLE(0b, 0b)
15555+#endif
15556+
15557+ "sete %1\n"
15558 : "+m" (l->a.counter), "=qm" (c)
15559 : : "memory");
15560 return c != 0;
15561@@ -109,7 +200,16 @@ static inline int local_add_negative(long i, local_t *l)
15562 {
15563 unsigned char c;
15564
15565- asm volatile(_ASM_ADD "%2,%0; sets %1"
15566+ asm volatile(_ASM_ADD "%2,%0\n"
15567+
15568+#ifdef CONFIG_PAX_REFCOUNT
15569+ "jno 0f\n"
15570+ _ASM_SUB "%2,%0\n"
15571+ "int $4\n0:\n"
15572+ _ASM_EXTABLE(0b, 0b)
15573+#endif
15574+
15575+ "sets %1\n"
15576 : "+m" (l->a.counter), "=qm" (c)
15577 : "ir" (i) : "memory");
15578 return c;
15579@@ -125,6 +225,30 @@ static inline int local_add_negative(long i, local_t *l)
15580 static inline long local_add_return(long i, local_t *l)
15581 {
15582 long __i = i;
15583+ asm volatile(_ASM_XADD "%0, %1\n"
15584+
15585+#ifdef CONFIG_PAX_REFCOUNT
15586+ "jno 0f\n"
15587+ _ASM_MOV "%0,%1\n"
15588+ "int $4\n0:\n"
15589+ _ASM_EXTABLE(0b, 0b)
15590+#endif
15591+
15592+ : "+r" (i), "+m" (l->a.counter)
15593+ : : "memory");
15594+ return i + __i;
15595+}
15596+
15597+/**
15598+ * local_add_return_unchecked - add and return
15599+ * @i: integer value to add
15600+ * @l: pointer to type local_unchecked_t
15601+ *
15602+ * Atomically adds @i to @l and returns @i + @l
15603+ */
15604+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
15605+{
15606+ long __i = i;
15607 asm volatile(_ASM_XADD "%0, %1;"
15608 : "+r" (i), "+m" (l->a.counter)
15609 : : "memory");
15610@@ -141,6 +265,8 @@ static inline long local_sub_return(long i, local_t *l)
15611
15612 #define local_cmpxchg(l, o, n) \
15613 (cmpxchg_local(&((l)->a.counter), (o), (n)))
15614+#define local_cmpxchg_unchecked(l, o, n) \
15615+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
15616 /* Always has a lock prefix */
15617 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
15618
15619diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
15620new file mode 100644
15621index 0000000..2bfd3ba
15622--- /dev/null
15623+++ b/arch/x86/include/asm/mman.h
15624@@ -0,0 +1,15 @@
15625+#ifndef _X86_MMAN_H
15626+#define _X86_MMAN_H
15627+
15628+#include <uapi/asm/mman.h>
15629+
15630+#ifdef __KERNEL__
15631+#ifndef __ASSEMBLY__
15632+#ifdef CONFIG_X86_32
15633+#define arch_mmap_check i386_mmap_check
15634+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
15635+#endif
15636+#endif
15637+#endif
15638+
15639+#endif /* X86_MMAN_H */
15640diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
15641index 5f55e69..e20bfb1 100644
15642--- a/arch/x86/include/asm/mmu.h
15643+++ b/arch/x86/include/asm/mmu.h
15644@@ -9,7 +9,7 @@
15645 * we put the segment information here.
15646 */
15647 typedef struct {
15648- void *ldt;
15649+ struct desc_struct *ldt;
15650 int size;
15651
15652 #ifdef CONFIG_X86_64
15653@@ -18,7 +18,19 @@ typedef struct {
15654 #endif
15655
15656 struct mutex lock;
15657- void *vdso;
15658+ unsigned long vdso;
15659+
15660+#ifdef CONFIG_X86_32
15661+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
15662+ unsigned long user_cs_base;
15663+ unsigned long user_cs_limit;
15664+
15665+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
15666+ cpumask_t cpu_user_cs_mask;
15667+#endif
15668+
15669+#endif
15670+#endif
15671 } mm_context_t;
15672
15673 #ifdef CONFIG_SMP
15674diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
15675index cdbf367..4c73c9e 100644
15676--- a/arch/x86/include/asm/mmu_context.h
15677+++ b/arch/x86/include/asm/mmu_context.h
15678@@ -24,6 +24,20 @@ void destroy_context(struct mm_struct *mm);
15679
15680 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
15681 {
15682+
15683+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15684+ if (!(static_cpu_has(X86_FEATURE_PCID))) {
15685+ unsigned int i;
15686+ pgd_t *pgd;
15687+
15688+ pax_open_kernel();
15689+ pgd = get_cpu_pgd(smp_processor_id(), kernel);
15690+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
15691+ set_pgd_batched(pgd+i, native_make_pgd(0));
15692+ pax_close_kernel();
15693+ }
15694+#endif
15695+
15696 #ifdef CONFIG_SMP
15697 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
15698 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
15699@@ -34,16 +48,55 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
15700 struct task_struct *tsk)
15701 {
15702 unsigned cpu = smp_processor_id();
15703+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
15704+ int tlbstate = TLBSTATE_OK;
15705+#endif
15706
15707 if (likely(prev != next)) {
15708 #ifdef CONFIG_SMP
15709+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
15710+ tlbstate = this_cpu_read(cpu_tlbstate.state);
15711+#endif
15712 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
15713 this_cpu_write(cpu_tlbstate.active_mm, next);
15714 #endif
15715 cpumask_set_cpu(cpu, mm_cpumask(next));
15716
15717 /* Re-load page tables */
15718+#ifdef CONFIG_PAX_PER_CPU_PGD
15719+ pax_open_kernel();
15720+
15721+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15722+ if (static_cpu_has(X86_FEATURE_PCID))
15723+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
15724+ else
15725+#endif
15726+
15727+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
15728+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
15729+ pax_close_kernel();
15730+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
15731+
15732+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15733+ if (static_cpu_has(X86_FEATURE_PCID)) {
15734+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
15735+ unsigned long descriptor[2];
15736+ descriptor[0] = PCID_USER;
15737+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
15738+ } else {
15739+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
15740+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
15741+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
15742+ else
15743+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
15744+ }
15745+ } else
15746+#endif
15747+
15748+ load_cr3(get_cpu_pgd(cpu, kernel));
15749+#else
15750 load_cr3(next->pgd);
15751+#endif
15752
15753 /* stop flush ipis for the previous mm */
15754 cpumask_clear_cpu(cpu, mm_cpumask(prev));
15755@@ -53,9 +106,63 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
15756 */
15757 if (unlikely(prev->context.ldt != next->context.ldt))
15758 load_LDT_nolock(&next->context);
15759- }
15760+
15761+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
15762+ if (!(__supported_pte_mask & _PAGE_NX)) {
15763+ smp_mb__before_clear_bit();
15764+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
15765+ smp_mb__after_clear_bit();
15766+ cpu_set(cpu, next->context.cpu_user_cs_mask);
15767+ }
15768+#endif
15769+
15770+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
15771+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
15772+ prev->context.user_cs_limit != next->context.user_cs_limit))
15773+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
15774 #ifdef CONFIG_SMP
15775+ else if (unlikely(tlbstate != TLBSTATE_OK))
15776+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
15777+#endif
15778+#endif
15779+
15780+ }
15781 else {
15782+
15783+#ifdef CONFIG_PAX_PER_CPU_PGD
15784+ pax_open_kernel();
15785+
15786+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15787+ if (static_cpu_has(X86_FEATURE_PCID))
15788+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
15789+ else
15790+#endif
15791+
15792+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
15793+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
15794+ pax_close_kernel();
15795+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
15796+
15797+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15798+ if (static_cpu_has(X86_FEATURE_PCID)) {
15799+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
15800+ unsigned long descriptor[2];
15801+ descriptor[0] = PCID_USER;
15802+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
15803+ } else {
15804+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
15805+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
15806+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
15807+ else
15808+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
15809+ }
15810+ } else
15811+#endif
15812+
15813+ load_cr3(get_cpu_pgd(cpu, kernel));
15814+#endif
15815+
15816+#ifdef CONFIG_SMP
15817 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
15818 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
15819
15820@@ -64,11 +171,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
15821 * tlb flush IPI delivery. We must reload CR3
15822 * to make sure to use no freed page tables.
15823 */
15824+
15825+#ifndef CONFIG_PAX_PER_CPU_PGD
15826 load_cr3(next->pgd);
15827+#endif
15828+
15829 load_LDT_nolock(&next->context);
15830+
15831+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
15832+ if (!(__supported_pte_mask & _PAGE_NX))
15833+ cpu_set(cpu, next->context.cpu_user_cs_mask);
15834+#endif
15835+
15836+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
15837+#ifdef CONFIG_PAX_PAGEEXEC
15838+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
15839+#endif
15840+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
15841+#endif
15842+
15843 }
15844+#endif
15845 }
15846-#endif
15847 }
15848
15849 #define activate_mm(prev, next) \
15850diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
15851index e3b7819..b257c64 100644
15852--- a/arch/x86/include/asm/module.h
15853+++ b/arch/x86/include/asm/module.h
15854@@ -5,6 +5,7 @@
15855
15856 #ifdef CONFIG_X86_64
15857 /* X86_64 does not define MODULE_PROC_FAMILY */
15858+#define MODULE_PROC_FAMILY ""
15859 #elif defined CONFIG_M486
15860 #define MODULE_PROC_FAMILY "486 "
15861 #elif defined CONFIG_M586
15862@@ -57,8 +58,20 @@
15863 #error unknown processor family
15864 #endif
15865
15866-#ifdef CONFIG_X86_32
15867-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
15868+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
15869+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
15870+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
15871+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
15872+#else
15873+#define MODULE_PAX_KERNEXEC ""
15874 #endif
15875
15876+#ifdef CONFIG_PAX_MEMORY_UDEREF
15877+#define MODULE_PAX_UDEREF "UDEREF "
15878+#else
15879+#define MODULE_PAX_UDEREF ""
15880+#endif
15881+
15882+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
15883+
15884 #endif /* _ASM_X86_MODULE_H */
15885diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
15886index 86f9301..b365cda 100644
15887--- a/arch/x86/include/asm/nmi.h
15888+++ b/arch/x86/include/asm/nmi.h
15889@@ -40,11 +40,11 @@ struct nmiaction {
15890 nmi_handler_t handler;
15891 unsigned long flags;
15892 const char *name;
15893-};
15894+} __do_const;
15895
15896 #define register_nmi_handler(t, fn, fg, n, init...) \
15897 ({ \
15898- static struct nmiaction init fn##_na = { \
15899+ static const struct nmiaction init fn##_na = { \
15900 .handler = (fn), \
15901 .name = (n), \
15902 .flags = (fg), \
15903@@ -52,7 +52,7 @@ struct nmiaction {
15904 __register_nmi_handler((t), &fn##_na); \
15905 })
15906
15907-int __register_nmi_handler(unsigned int, struct nmiaction *);
15908+int __register_nmi_handler(unsigned int, const struct nmiaction *);
15909
15910 void unregister_nmi_handler(unsigned int, const char *);
15911
15912diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
15913index c878924..21f4889 100644
15914--- a/arch/x86/include/asm/page.h
15915+++ b/arch/x86/include/asm/page.h
15916@@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
15917 __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
15918
15919 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
15920+#define __early_va(x) ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base))
15921
15922 #define __boot_va(x) __va(x)
15923 #define __boot_pa(x) __pa(x)
15924diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
15925index 0f1ddee..e2fc3d1 100644
15926--- a/arch/x86/include/asm/page_64.h
15927+++ b/arch/x86/include/asm/page_64.h
15928@@ -7,9 +7,9 @@
15929
15930 /* duplicated to the one in bootmem.h */
15931 extern unsigned long max_pfn;
15932-extern unsigned long phys_base;
15933+extern const unsigned long phys_base;
15934
15935-static inline unsigned long __phys_addr_nodebug(unsigned long x)
15936+static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
15937 {
15938 unsigned long y = x - __START_KERNEL_map;
15939
15940diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
15941index cfdc9ee..3f7b5d6 100644
15942--- a/arch/x86/include/asm/paravirt.h
15943+++ b/arch/x86/include/asm/paravirt.h
15944@@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
15945 return (pmd_t) { ret };
15946 }
15947
15948-static inline pmdval_t pmd_val(pmd_t pmd)
15949+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
15950 {
15951 pmdval_t ret;
15952
15953@@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
15954 val);
15955 }
15956
15957+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
15958+{
15959+ pgdval_t val = native_pgd_val(pgd);
15960+
15961+ if (sizeof(pgdval_t) > sizeof(long))
15962+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
15963+ val, (u64)val >> 32);
15964+ else
15965+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
15966+ val);
15967+}
15968+
15969 static inline void pgd_clear(pgd_t *pgdp)
15970 {
15971 set_pgd(pgdp, __pgd(0));
15972@@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
15973 pv_mmu_ops.set_fixmap(idx, phys, flags);
15974 }
15975
15976+#ifdef CONFIG_PAX_KERNEXEC
15977+static inline unsigned long pax_open_kernel(void)
15978+{
15979+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
15980+}
15981+
15982+static inline unsigned long pax_close_kernel(void)
15983+{
15984+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
15985+}
15986+#else
15987+static inline unsigned long pax_open_kernel(void) { return 0; }
15988+static inline unsigned long pax_close_kernel(void) { return 0; }
15989+#endif
15990+
15991 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
15992
15993 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
15994@@ -926,7 +953,7 @@ extern void default_banner(void);
15995
15996 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
15997 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
15998-#define PARA_INDIRECT(addr) *%cs:addr
15999+#define PARA_INDIRECT(addr) *%ss:addr
16000 #endif
16001
16002 #define INTERRUPT_RETURN \
16003@@ -1001,6 +1028,21 @@ extern void default_banner(void);
16004 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
16005 CLBR_NONE, \
16006 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
16007+
16008+#define GET_CR0_INTO_RDI \
16009+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
16010+ mov %rax,%rdi
16011+
16012+#define SET_RDI_INTO_CR0 \
16013+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
16014+
16015+#define GET_CR3_INTO_RDI \
16016+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
16017+ mov %rax,%rdi
16018+
16019+#define SET_RDI_INTO_CR3 \
16020+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
16021+
16022 #endif /* CONFIG_X86_32 */
16023
16024 #endif /* __ASSEMBLY__ */
16025diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
16026index 0db1fca..52310cc 100644
16027--- a/arch/x86/include/asm/paravirt_types.h
16028+++ b/arch/x86/include/asm/paravirt_types.h
16029@@ -84,7 +84,7 @@ struct pv_init_ops {
16030 */
16031 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
16032 unsigned long addr, unsigned len);
16033-};
16034+} __no_const;
16035
16036
16037 struct pv_lazy_ops {
16038@@ -98,7 +98,7 @@ struct pv_time_ops {
16039 unsigned long long (*sched_clock)(void);
16040 unsigned long long (*steal_clock)(int cpu);
16041 unsigned long (*get_tsc_khz)(void);
16042-};
16043+} __no_const;
16044
16045 struct pv_cpu_ops {
16046 /* hooks for various privileged instructions */
16047@@ -192,7 +192,7 @@ struct pv_cpu_ops {
16048
16049 void (*start_context_switch)(struct task_struct *prev);
16050 void (*end_context_switch)(struct task_struct *next);
16051-};
16052+} __no_const;
16053
16054 struct pv_irq_ops {
16055 /*
16056@@ -223,7 +223,7 @@ struct pv_apic_ops {
16057 unsigned long start_eip,
16058 unsigned long start_esp);
16059 #endif
16060-};
16061+} __no_const;
16062
16063 struct pv_mmu_ops {
16064 unsigned long (*read_cr2)(void);
16065@@ -313,6 +313,7 @@ struct pv_mmu_ops {
16066 struct paravirt_callee_save make_pud;
16067
16068 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
16069+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
16070 #endif /* PAGETABLE_LEVELS == 4 */
16071 #endif /* PAGETABLE_LEVELS >= 3 */
16072
16073@@ -324,6 +325,12 @@ struct pv_mmu_ops {
16074 an mfn. We can tell which is which from the index. */
16075 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
16076 phys_addr_t phys, pgprot_t flags);
16077+
16078+#ifdef CONFIG_PAX_KERNEXEC
16079+ unsigned long (*pax_open_kernel)(void);
16080+ unsigned long (*pax_close_kernel)(void);
16081+#endif
16082+
16083 };
16084
16085 struct arch_spinlock;
16086@@ -334,7 +341,7 @@ struct pv_lock_ops {
16087 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
16088 int (*spin_trylock)(struct arch_spinlock *lock);
16089 void (*spin_unlock)(struct arch_spinlock *lock);
16090-};
16091+} __no_const;
16092
16093 /* This contains all the paravirt structures: we get a convenient
16094 * number for each function using the offset which we use to indicate
16095diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
16096index b4389a4..7024269 100644
16097--- a/arch/x86/include/asm/pgalloc.h
16098+++ b/arch/x86/include/asm/pgalloc.h
16099@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
16100 pmd_t *pmd, pte_t *pte)
16101 {
16102 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
16103+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
16104+}
16105+
16106+static inline void pmd_populate_user(struct mm_struct *mm,
16107+ pmd_t *pmd, pte_t *pte)
16108+{
16109+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
16110 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
16111 }
16112
16113@@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
16114
16115 #ifdef CONFIG_X86_PAE
16116 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
16117+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
16118+{
16119+ pud_populate(mm, pudp, pmd);
16120+}
16121 #else /* !CONFIG_X86_PAE */
16122 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
16123 {
16124 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
16125 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
16126 }
16127+
16128+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
16129+{
16130+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
16131+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
16132+}
16133 #endif /* CONFIG_X86_PAE */
16134
16135 #if PAGETABLE_LEVELS > 3
16136@@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
16137 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
16138 }
16139
16140+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
16141+{
16142+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
16143+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
16144+}
16145+
16146 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
16147 {
16148 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
16149diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
16150index f2b489c..4f7e2e5 100644
16151--- a/arch/x86/include/asm/pgtable-2level.h
16152+++ b/arch/x86/include/asm/pgtable-2level.h
16153@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
16154
16155 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
16156 {
16157+ pax_open_kernel();
16158 *pmdp = pmd;
16159+ pax_close_kernel();
16160 }
16161
16162 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
16163diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
16164index 4cc9f2b..5fd9226 100644
16165--- a/arch/x86/include/asm/pgtable-3level.h
16166+++ b/arch/x86/include/asm/pgtable-3level.h
16167@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
16168
16169 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
16170 {
16171+ pax_open_kernel();
16172 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
16173+ pax_close_kernel();
16174 }
16175
16176 static inline void native_set_pud(pud_t *pudp, pud_t pud)
16177 {
16178+ pax_open_kernel();
16179 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
16180+ pax_close_kernel();
16181 }
16182
16183 /*
16184diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
16185index 1e67223..92a9585 100644
16186--- a/arch/x86/include/asm/pgtable.h
16187+++ b/arch/x86/include/asm/pgtable.h
16188@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
16189
16190 #ifndef __PAGETABLE_PUD_FOLDED
16191 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
16192+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
16193 #define pgd_clear(pgd) native_pgd_clear(pgd)
16194 #endif
16195
16196@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
16197
16198 #define arch_end_context_switch(prev) do {} while(0)
16199
16200+#define pax_open_kernel() native_pax_open_kernel()
16201+#define pax_close_kernel() native_pax_close_kernel()
16202 #endif /* CONFIG_PARAVIRT */
16203
16204+#define __HAVE_ARCH_PAX_OPEN_KERNEL
16205+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
16206+
16207+#ifdef CONFIG_PAX_KERNEXEC
16208+static inline unsigned long native_pax_open_kernel(void)
16209+{
16210+ unsigned long cr0;
16211+
16212+ preempt_disable();
16213+ barrier();
16214+ cr0 = read_cr0() ^ X86_CR0_WP;
16215+ BUG_ON(cr0 & X86_CR0_WP);
16216+ write_cr0(cr0);
16217+ return cr0 ^ X86_CR0_WP;
16218+}
16219+
16220+static inline unsigned long native_pax_close_kernel(void)
16221+{
16222+ unsigned long cr0;
16223+
16224+ cr0 = read_cr0() ^ X86_CR0_WP;
16225+ BUG_ON(!(cr0 & X86_CR0_WP));
16226+ write_cr0(cr0);
16227+ barrier();
16228+ preempt_enable_no_resched();
16229+ return cr0 ^ X86_CR0_WP;
16230+}
16231+#else
16232+static inline unsigned long native_pax_open_kernel(void) { return 0; }
16233+static inline unsigned long native_pax_close_kernel(void) { return 0; }
16234+#endif
16235+
16236 /*
16237 * The following only work if pte_present() is true.
16238 * Undefined behaviour if not..
16239 */
16240+static inline int pte_user(pte_t pte)
16241+{
16242+ return pte_val(pte) & _PAGE_USER;
16243+}
16244+
16245 static inline int pte_dirty(pte_t pte)
16246 {
16247 return pte_flags(pte) & _PAGE_DIRTY;
16248@@ -147,6 +187,11 @@ static inline unsigned long pud_pfn(pud_t pud)
16249 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
16250 }
16251
16252+static inline unsigned long pgd_pfn(pgd_t pgd)
16253+{
16254+ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
16255+}
16256+
16257 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
16258
16259 static inline int pmd_large(pmd_t pte)
16260@@ -200,9 +245,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
16261 return pte_clear_flags(pte, _PAGE_RW);
16262 }
16263
16264+static inline pte_t pte_mkread(pte_t pte)
16265+{
16266+ return __pte(pte_val(pte) | _PAGE_USER);
16267+}
16268+
16269 static inline pte_t pte_mkexec(pte_t pte)
16270 {
16271- return pte_clear_flags(pte, _PAGE_NX);
16272+#ifdef CONFIG_X86_PAE
16273+ if (__supported_pte_mask & _PAGE_NX)
16274+ return pte_clear_flags(pte, _PAGE_NX);
16275+ else
16276+#endif
16277+ return pte_set_flags(pte, _PAGE_USER);
16278+}
16279+
16280+static inline pte_t pte_exprotect(pte_t pte)
16281+{
16282+#ifdef CONFIG_X86_PAE
16283+ if (__supported_pte_mask & _PAGE_NX)
16284+ return pte_set_flags(pte, _PAGE_NX);
16285+ else
16286+#endif
16287+ return pte_clear_flags(pte, _PAGE_USER);
16288 }
16289
16290 static inline pte_t pte_mkdirty(pte_t pte)
16291@@ -394,6 +459,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
16292 #endif
16293
16294 #ifndef __ASSEMBLY__
16295+
16296+#ifdef CONFIG_PAX_PER_CPU_PGD
16297+extern pgd_t cpu_pgd[NR_CPUS][2][PTRS_PER_PGD];
16298+enum cpu_pgd_type {kernel = 0, user = 1};
16299+static inline pgd_t *get_cpu_pgd(unsigned int cpu, enum cpu_pgd_type type)
16300+{
16301+ return cpu_pgd[cpu][type];
16302+}
16303+#endif
16304+
16305 #include <linux/mm_types.h>
16306 #include <linux/log2.h>
16307
16308@@ -529,7 +604,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
16309 * Currently stuck as a macro due to indirect forward reference to
16310 * linux/mmzone.h's __section_mem_map_addr() definition:
16311 */
16312-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
16313+#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
16314
16315 /* Find an entry in the second-level page table.. */
16316 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
16317@@ -569,7 +644,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
16318 * Currently stuck as a macro due to indirect forward reference to
16319 * linux/mmzone.h's __section_mem_map_addr() definition:
16320 */
16321-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
16322+#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
16323
16324 /* to find an entry in a page-table-directory. */
16325 static inline unsigned long pud_index(unsigned long address)
16326@@ -584,7 +659,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
16327
16328 static inline int pgd_bad(pgd_t pgd)
16329 {
16330- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
16331+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
16332 }
16333
16334 static inline int pgd_none(pgd_t pgd)
16335@@ -607,7 +682,12 @@ static inline int pgd_none(pgd_t pgd)
16336 * pgd_offset() returns a (pgd_t *)
16337 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
16338 */
16339-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
16340+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
16341+
16342+#ifdef CONFIG_PAX_PER_CPU_PGD
16343+#define pgd_offset_cpu(cpu, type, address) (get_cpu_pgd(cpu, type) + pgd_index(address))
16344+#endif
16345+
16346 /*
16347 * a shortcut which implies the use of the kernel's pgd, instead
16348 * of a process's
16349@@ -618,6 +698,23 @@ static inline int pgd_none(pgd_t pgd)
16350 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
16351 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
16352
16353+#ifdef CONFIG_X86_32
16354+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
16355+#else
16356+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
16357+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
16358+
16359+#ifdef CONFIG_PAX_MEMORY_UDEREF
16360+#ifdef __ASSEMBLY__
16361+#define pax_user_shadow_base pax_user_shadow_base(%rip)
16362+#else
16363+extern unsigned long pax_user_shadow_base;
16364+extern pgdval_t clone_pgd_mask;
16365+#endif
16366+#endif
16367+
16368+#endif
16369+
16370 #ifndef __ASSEMBLY__
16371
16372 extern int direct_gbpages;
16373@@ -784,11 +881,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
16374 * dst and src can be on the same page, but the range must not overlap,
16375 * and must not cross a page boundary.
16376 */
16377-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
16378+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
16379 {
16380- memcpy(dst, src, count * sizeof(pgd_t));
16381+ pax_open_kernel();
16382+ while (count--)
16383+ *dst++ = *src++;
16384+ pax_close_kernel();
16385 }
16386
16387+#ifdef CONFIG_PAX_PER_CPU_PGD
16388+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
16389+#endif
16390+
16391+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16392+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
16393+#else
16394+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
16395+#endif
16396+
16397 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
16398 static inline int page_level_shift(enum pg_level level)
16399 {
16400diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
16401index 9ee3221..b979c6b 100644
16402--- a/arch/x86/include/asm/pgtable_32.h
16403+++ b/arch/x86/include/asm/pgtable_32.h
16404@@ -25,9 +25,6 @@
16405 struct mm_struct;
16406 struct vm_area_struct;
16407
16408-extern pgd_t swapper_pg_dir[1024];
16409-extern pgd_t initial_page_table[1024];
16410-
16411 static inline void pgtable_cache_init(void) { }
16412 static inline void check_pgt_cache(void) { }
16413 void paging_init(void);
16414@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
16415 # include <asm/pgtable-2level.h>
16416 #endif
16417
16418+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
16419+extern pgd_t initial_page_table[PTRS_PER_PGD];
16420+#ifdef CONFIG_X86_PAE
16421+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
16422+#endif
16423+
16424 #if defined(CONFIG_HIGHPTE)
16425 #define pte_offset_map(dir, address) \
16426 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
16427@@ -62,12 +65,17 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
16428 /* Clear a kernel PTE and flush it from the TLB */
16429 #define kpte_clear_flush(ptep, vaddr) \
16430 do { \
16431+ pax_open_kernel(); \
16432 pte_clear(&init_mm, (vaddr), (ptep)); \
16433+ pax_close_kernel(); \
16434 __flush_tlb_one((vaddr)); \
16435 } while (0)
16436
16437 #endif /* !__ASSEMBLY__ */
16438
16439+#define HAVE_ARCH_UNMAPPED_AREA
16440+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
16441+
16442 /*
16443 * kern_addr_valid() is (1) for FLATMEM and (0) for
16444 * SPARSEMEM and DISCONTIGMEM
16445diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
16446index ed5903b..c7fe163 100644
16447--- a/arch/x86/include/asm/pgtable_32_types.h
16448+++ b/arch/x86/include/asm/pgtable_32_types.h
16449@@ -8,7 +8,7 @@
16450 */
16451 #ifdef CONFIG_X86_PAE
16452 # include <asm/pgtable-3level_types.h>
16453-# define PMD_SIZE (1UL << PMD_SHIFT)
16454+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
16455 # define PMD_MASK (~(PMD_SIZE - 1))
16456 #else
16457 # include <asm/pgtable-2level_types.h>
16458@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
16459 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
16460 #endif
16461
16462+#ifdef CONFIG_PAX_KERNEXEC
16463+#ifndef __ASSEMBLY__
16464+extern unsigned char MODULES_EXEC_VADDR[];
16465+extern unsigned char MODULES_EXEC_END[];
16466+#endif
16467+#include <asm/boot.h>
16468+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
16469+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
16470+#else
16471+#define ktla_ktva(addr) (addr)
16472+#define ktva_ktla(addr) (addr)
16473+#endif
16474+
16475 #define MODULES_VADDR VMALLOC_START
16476 #define MODULES_END VMALLOC_END
16477 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
16478diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
16479index e22c1db..23a625a 100644
16480--- a/arch/x86/include/asm/pgtable_64.h
16481+++ b/arch/x86/include/asm/pgtable_64.h
16482@@ -16,10 +16,14 @@
16483
16484 extern pud_t level3_kernel_pgt[512];
16485 extern pud_t level3_ident_pgt[512];
16486+extern pud_t level3_vmalloc_start_pgt[512];
16487+extern pud_t level3_vmalloc_end_pgt[512];
16488+extern pud_t level3_vmemmap_pgt[512];
16489+extern pud_t level2_vmemmap_pgt[512];
16490 extern pmd_t level2_kernel_pgt[512];
16491 extern pmd_t level2_fixmap_pgt[512];
16492-extern pmd_t level2_ident_pgt[512];
16493-extern pgd_t init_level4_pgt[];
16494+extern pmd_t level2_ident_pgt[512*2];
16495+extern pgd_t init_level4_pgt[512];
16496
16497 #define swapper_pg_dir init_level4_pgt
16498
16499@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
16500
16501 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
16502 {
16503+ pax_open_kernel();
16504 *pmdp = pmd;
16505+ pax_close_kernel();
16506 }
16507
16508 static inline void native_pmd_clear(pmd_t *pmd)
16509@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
16510
16511 static inline void native_set_pud(pud_t *pudp, pud_t pud)
16512 {
16513+ pax_open_kernel();
16514 *pudp = pud;
16515+ pax_close_kernel();
16516 }
16517
16518 static inline void native_pud_clear(pud_t *pud)
16519@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
16520
16521 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
16522 {
16523+ pax_open_kernel();
16524+ *pgdp = pgd;
16525+ pax_close_kernel();
16526+}
16527+
16528+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
16529+{
16530 *pgdp = pgd;
16531 }
16532
16533diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
16534index 2d88344..4679fc3 100644
16535--- a/arch/x86/include/asm/pgtable_64_types.h
16536+++ b/arch/x86/include/asm/pgtable_64_types.h
16537@@ -61,6 +61,11 @@ typedef struct { pteval_t pte; } pte_t;
16538 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
16539 #define MODULES_END _AC(0xffffffffff000000, UL)
16540 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
16541+#define MODULES_EXEC_VADDR MODULES_VADDR
16542+#define MODULES_EXEC_END MODULES_END
16543+
16544+#define ktla_ktva(addr) (addr)
16545+#define ktva_ktla(addr) (addr)
16546
16547 #define EARLY_DYNAMIC_PAGE_TABLES 64
16548
16549diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
16550index e642300..0ef8f31 100644
16551--- a/arch/x86/include/asm/pgtable_types.h
16552+++ b/arch/x86/include/asm/pgtable_types.h
16553@@ -16,13 +16,12 @@
16554 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
16555 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
16556 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
16557-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
16558+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
16559 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
16560 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
16561 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
16562-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
16563-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
16564-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
16565+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
16566+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
16567 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
16568
16569 /* If _PAGE_BIT_PRESENT is clear, we use these: */
16570@@ -40,7 +39,6 @@
16571 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
16572 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
16573 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
16574-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
16575 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
16576 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
16577 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
16578@@ -57,8 +55,10 @@
16579
16580 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
16581 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
16582-#else
16583+#elif defined(CONFIG_KMEMCHECK)
16584 #define _PAGE_NX (_AT(pteval_t, 0))
16585+#else
16586+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
16587 #endif
16588
16589 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
16590@@ -116,6 +116,9 @@
16591 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
16592 _PAGE_ACCESSED)
16593
16594+#define PAGE_READONLY_NOEXEC PAGE_READONLY
16595+#define PAGE_SHARED_NOEXEC PAGE_SHARED
16596+
16597 #define __PAGE_KERNEL_EXEC \
16598 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
16599 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
16600@@ -126,7 +129,7 @@
16601 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
16602 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
16603 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
16604-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
16605+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
16606 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
16607 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
16608 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
16609@@ -188,8 +191,8 @@
16610 * bits are combined, this will alow user to access the high address mapped
16611 * VDSO in the presence of CONFIG_COMPAT_VDSO
16612 */
16613-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
16614-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
16615+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
16616+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
16617 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
16618 #endif
16619
16620@@ -227,7 +230,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
16621 {
16622 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
16623 }
16624+#endif
16625
16626+#if PAGETABLE_LEVELS == 3
16627+#include <asm-generic/pgtable-nopud.h>
16628+#endif
16629+
16630+#if PAGETABLE_LEVELS == 2
16631+#include <asm-generic/pgtable-nopmd.h>
16632+#endif
16633+
16634+#ifndef __ASSEMBLY__
16635 #if PAGETABLE_LEVELS > 3
16636 typedef struct { pudval_t pud; } pud_t;
16637
16638@@ -241,8 +254,6 @@ static inline pudval_t native_pud_val(pud_t pud)
16639 return pud.pud;
16640 }
16641 #else
16642-#include <asm-generic/pgtable-nopud.h>
16643-
16644 static inline pudval_t native_pud_val(pud_t pud)
16645 {
16646 return native_pgd_val(pud.pgd);
16647@@ -262,8 +273,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
16648 return pmd.pmd;
16649 }
16650 #else
16651-#include <asm-generic/pgtable-nopmd.h>
16652-
16653 static inline pmdval_t native_pmd_val(pmd_t pmd)
16654 {
16655 return native_pgd_val(pmd.pud.pgd);
16656@@ -303,7 +312,6 @@ typedef struct page *pgtable_t;
16657
16658 extern pteval_t __supported_pte_mask;
16659 extern void set_nx(void);
16660-extern int nx_enabled;
16661
16662 #define pgprot_writecombine pgprot_writecombine
16663 extern pgprot_t pgprot_writecombine(pgprot_t prot);
16664diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
16665index 22224b3..b3a2f90 100644
16666--- a/arch/x86/include/asm/processor.h
16667+++ b/arch/x86/include/asm/processor.h
16668@@ -198,9 +198,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
16669 : "memory");
16670 }
16671
16672+/* invpcid (%rdx),%rax */
16673+#define __ASM_INVPCID ".byte 0x66,0x0f,0x38,0x82,0x02"
16674+
16675+#define INVPCID_SINGLE_ADDRESS 0UL
16676+#define INVPCID_SINGLE_CONTEXT 1UL
16677+#define INVPCID_ALL_GLOBAL 2UL
16678+#define INVPCID_ALL_MONGLOBAL 3UL
16679+
16680+#define PCID_KERNEL 0UL
16681+#define PCID_USER 1UL
16682+#define PCID_NOFLUSH (1UL << 63)
16683+
16684 static inline void load_cr3(pgd_t *pgdir)
16685 {
16686- write_cr3(__pa(pgdir));
16687+ write_cr3(__pa(pgdir) | PCID_KERNEL);
16688 }
16689
16690 #ifdef CONFIG_X86_32
16691@@ -282,7 +294,7 @@ struct tss_struct {
16692
16693 } ____cacheline_aligned;
16694
16695-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
16696+extern struct tss_struct init_tss[NR_CPUS];
16697
16698 /*
16699 * Save the original ist values for checking stack pointers during debugging
16700@@ -452,6 +464,7 @@ struct thread_struct {
16701 unsigned short ds;
16702 unsigned short fsindex;
16703 unsigned short gsindex;
16704+ unsigned short ss;
16705 #endif
16706 #ifdef CONFIG_X86_32
16707 unsigned long ip;
16708@@ -552,29 +565,8 @@ static inline void load_sp0(struct tss_struct *tss,
16709 extern unsigned long mmu_cr4_features;
16710 extern u32 *trampoline_cr4_features;
16711
16712-static inline void set_in_cr4(unsigned long mask)
16713-{
16714- unsigned long cr4;
16715-
16716- mmu_cr4_features |= mask;
16717- if (trampoline_cr4_features)
16718- *trampoline_cr4_features = mmu_cr4_features;
16719- cr4 = read_cr4();
16720- cr4 |= mask;
16721- write_cr4(cr4);
16722-}
16723-
16724-static inline void clear_in_cr4(unsigned long mask)
16725-{
16726- unsigned long cr4;
16727-
16728- mmu_cr4_features &= ~mask;
16729- if (trampoline_cr4_features)
16730- *trampoline_cr4_features = mmu_cr4_features;
16731- cr4 = read_cr4();
16732- cr4 &= ~mask;
16733- write_cr4(cr4);
16734-}
16735+extern void set_in_cr4(unsigned long mask);
16736+extern void clear_in_cr4(unsigned long mask);
16737
16738 typedef struct {
16739 unsigned long seg;
16740@@ -823,11 +815,18 @@ static inline void spin_lock_prefetch(const void *x)
16741 */
16742 #define TASK_SIZE PAGE_OFFSET
16743 #define TASK_SIZE_MAX TASK_SIZE
16744+
16745+#ifdef CONFIG_PAX_SEGMEXEC
16746+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
16747+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
16748+#else
16749 #define STACK_TOP TASK_SIZE
16750-#define STACK_TOP_MAX STACK_TOP
16751+#endif
16752+
16753+#define STACK_TOP_MAX TASK_SIZE
16754
16755 #define INIT_THREAD { \
16756- .sp0 = sizeof(init_stack) + (long)&init_stack, \
16757+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
16758 .vm86_info = NULL, \
16759 .sysenter_cs = __KERNEL_CS, \
16760 .io_bitmap_ptr = NULL, \
16761@@ -841,7 +840,7 @@ static inline void spin_lock_prefetch(const void *x)
16762 */
16763 #define INIT_TSS { \
16764 .x86_tss = { \
16765- .sp0 = sizeof(init_stack) + (long)&init_stack, \
16766+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
16767 .ss0 = __KERNEL_DS, \
16768 .ss1 = __KERNEL_CS, \
16769 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
16770@@ -852,11 +851,7 @@ static inline void spin_lock_prefetch(const void *x)
16771 extern unsigned long thread_saved_pc(struct task_struct *tsk);
16772
16773 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
16774-#define KSTK_TOP(info) \
16775-({ \
16776- unsigned long *__ptr = (unsigned long *)(info); \
16777- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
16778-})
16779+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
16780
16781 /*
16782 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
16783@@ -871,7 +866,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
16784 #define task_pt_regs(task) \
16785 ({ \
16786 struct pt_regs *__regs__; \
16787- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
16788+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
16789 __regs__ - 1; \
16790 })
16791
16792@@ -881,13 +876,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
16793 /*
16794 * User space process size. 47bits minus one guard page.
16795 */
16796-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
16797+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
16798
16799 /* This decides where the kernel will search for a free chunk of vm
16800 * space during mmap's.
16801 */
16802 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
16803- 0xc0000000 : 0xFFFFe000)
16804+ 0xc0000000 : 0xFFFFf000)
16805
16806 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
16807 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
16808@@ -898,11 +893,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
16809 #define STACK_TOP_MAX TASK_SIZE_MAX
16810
16811 #define INIT_THREAD { \
16812- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
16813+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
16814 }
16815
16816 #define INIT_TSS { \
16817- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
16818+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
16819 }
16820
16821 /*
16822@@ -930,6 +925,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
16823 */
16824 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
16825
16826+#ifdef CONFIG_PAX_SEGMEXEC
16827+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
16828+#endif
16829+
16830 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
16831
16832 /* Get/set a process' ability to use the timestamp counter instruction */
16833@@ -942,7 +941,8 @@ extern int set_tsc_mode(unsigned int val);
16834 extern u16 amd_get_nb_id(int cpu);
16835
16836 struct aperfmperf {
16837- u64 aperf, mperf;
16838+ u64 aperf __intentional_overflow(0);
16839+ u64 mperf __intentional_overflow(0);
16840 };
16841
16842 static inline void get_aperfmperf(struct aperfmperf *am)
16843@@ -970,7 +970,7 @@ unsigned long calc_aperfmperf_ratio(struct aperfmperf *old,
16844 return ratio;
16845 }
16846
16847-extern unsigned long arch_align_stack(unsigned long sp);
16848+#define arch_align_stack(x) ((x) & ~0xfUL)
16849 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
16850
16851 void default_idle(void);
16852@@ -980,6 +980,6 @@ bool xen_set_default_idle(void);
16853 #define xen_set_default_idle 0
16854 #endif
16855
16856-void stop_this_cpu(void *dummy);
16857+void stop_this_cpu(void *dummy) __noreturn;
16858
16859 #endif /* _ASM_X86_PROCESSOR_H */
16860diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
16861index 942a086..6c26446 100644
16862--- a/arch/x86/include/asm/ptrace.h
16863+++ b/arch/x86/include/asm/ptrace.h
16864@@ -85,28 +85,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
16865 }
16866
16867 /*
16868- * user_mode_vm(regs) determines whether a register set came from user mode.
16869+ * user_mode(regs) determines whether a register set came from user mode.
16870 * This is true if V8086 mode was enabled OR if the register set was from
16871 * protected mode with RPL-3 CS value. This tricky test checks that with
16872 * one comparison. Many places in the kernel can bypass this full check
16873- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
16874+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
16875+ * be used.
16876 */
16877-static inline int user_mode(struct pt_regs *regs)
16878+static inline int user_mode_novm(struct pt_regs *regs)
16879 {
16880 #ifdef CONFIG_X86_32
16881 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
16882 #else
16883- return !!(regs->cs & 3);
16884+ return !!(regs->cs & SEGMENT_RPL_MASK);
16885 #endif
16886 }
16887
16888-static inline int user_mode_vm(struct pt_regs *regs)
16889+static inline int user_mode(struct pt_regs *regs)
16890 {
16891 #ifdef CONFIG_X86_32
16892 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
16893 USER_RPL;
16894 #else
16895- return user_mode(regs);
16896+ return user_mode_novm(regs);
16897 #endif
16898 }
16899
16900@@ -122,15 +123,16 @@ static inline int v8086_mode(struct pt_regs *regs)
16901 #ifdef CONFIG_X86_64
16902 static inline bool user_64bit_mode(struct pt_regs *regs)
16903 {
16904+ unsigned long cs = regs->cs & 0xffff;
16905 #ifndef CONFIG_PARAVIRT
16906 /*
16907 * On non-paravirt systems, this is the only long mode CPL 3
16908 * selector. We do not allow long mode selectors in the LDT.
16909 */
16910- return regs->cs == __USER_CS;
16911+ return cs == __USER_CS;
16912 #else
16913 /* Headers are too twisted for this to go in paravirt.h. */
16914- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
16915+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
16916 #endif
16917 }
16918
16919@@ -181,9 +183,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
16920 * Traps from the kernel do not save sp and ss.
16921 * Use the helper function to retrieve sp.
16922 */
16923- if (offset == offsetof(struct pt_regs, sp) &&
16924- regs->cs == __KERNEL_CS)
16925- return kernel_stack_pointer(regs);
16926+ if (offset == offsetof(struct pt_regs, sp)) {
16927+ unsigned long cs = regs->cs & 0xffff;
16928+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
16929+ return kernel_stack_pointer(regs);
16930+ }
16931 #endif
16932 return *(unsigned long *)((unsigned long)regs + offset);
16933 }
16934diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
16935index 9c6b890..5305f53 100644
16936--- a/arch/x86/include/asm/realmode.h
16937+++ b/arch/x86/include/asm/realmode.h
16938@@ -22,16 +22,14 @@ struct real_mode_header {
16939 #endif
16940 /* APM/BIOS reboot */
16941 u32 machine_real_restart_asm;
16942-#ifdef CONFIG_X86_64
16943 u32 machine_real_restart_seg;
16944-#endif
16945 };
16946
16947 /* This must match data at trampoline_32/64.S */
16948 struct trampoline_header {
16949 #ifdef CONFIG_X86_32
16950 u32 start;
16951- u16 gdt_pad;
16952+ u16 boot_cs;
16953 u16 gdt_limit;
16954 u32 gdt_base;
16955 #else
16956diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
16957index a82c4f1..ac45053 100644
16958--- a/arch/x86/include/asm/reboot.h
16959+++ b/arch/x86/include/asm/reboot.h
16960@@ -6,13 +6,13 @@
16961 struct pt_regs;
16962
16963 struct machine_ops {
16964- void (*restart)(char *cmd);
16965- void (*halt)(void);
16966- void (*power_off)(void);
16967+ void (* __noreturn restart)(char *cmd);
16968+ void (* __noreturn halt)(void);
16969+ void (* __noreturn power_off)(void);
16970 void (*shutdown)(void);
16971 void (*crash_shutdown)(struct pt_regs *);
16972- void (*emergency_restart)(void);
16973-};
16974+ void (* __noreturn emergency_restart)(void);
16975+} __no_const;
16976
16977 extern struct machine_ops machine_ops;
16978
16979diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
16980index cad82c9..2e5c5c1 100644
16981--- a/arch/x86/include/asm/rwsem.h
16982+++ b/arch/x86/include/asm/rwsem.h
16983@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
16984 {
16985 asm volatile("# beginning down_read\n\t"
16986 LOCK_PREFIX _ASM_INC "(%1)\n\t"
16987+
16988+#ifdef CONFIG_PAX_REFCOUNT
16989+ "jno 0f\n"
16990+ LOCK_PREFIX _ASM_DEC "(%1)\n"
16991+ "int $4\n0:\n"
16992+ _ASM_EXTABLE(0b, 0b)
16993+#endif
16994+
16995 /* adds 0x00000001 */
16996 " jns 1f\n"
16997 " call call_rwsem_down_read_failed\n"
16998@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
16999 "1:\n\t"
17000 " mov %1,%2\n\t"
17001 " add %3,%2\n\t"
17002+
17003+#ifdef CONFIG_PAX_REFCOUNT
17004+ "jno 0f\n"
17005+ "sub %3,%2\n"
17006+ "int $4\n0:\n"
17007+ _ASM_EXTABLE(0b, 0b)
17008+#endif
17009+
17010 " jle 2f\n\t"
17011 LOCK_PREFIX " cmpxchg %2,%0\n\t"
17012 " jnz 1b\n\t"
17013@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
17014 long tmp;
17015 asm volatile("# beginning down_write\n\t"
17016 LOCK_PREFIX " xadd %1,(%2)\n\t"
17017+
17018+#ifdef CONFIG_PAX_REFCOUNT
17019+ "jno 0f\n"
17020+ "mov %1,(%2)\n"
17021+ "int $4\n0:\n"
17022+ _ASM_EXTABLE(0b, 0b)
17023+#endif
17024+
17025 /* adds 0xffff0001, returns the old value */
17026 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
17027 /* was the active mask 0 before? */
17028@@ -155,6 +179,14 @@ static inline void __up_read(struct rw_semaphore *sem)
17029 long tmp;
17030 asm volatile("# beginning __up_read\n\t"
17031 LOCK_PREFIX " xadd %1,(%2)\n\t"
17032+
17033+#ifdef CONFIG_PAX_REFCOUNT
17034+ "jno 0f\n"
17035+ "mov %1,(%2)\n"
17036+ "int $4\n0:\n"
17037+ _ASM_EXTABLE(0b, 0b)
17038+#endif
17039+
17040 /* subtracts 1, returns the old value */
17041 " jns 1f\n\t"
17042 " call call_rwsem_wake\n" /* expects old value in %edx */
17043@@ -173,6 +205,14 @@ static inline void __up_write(struct rw_semaphore *sem)
17044 long tmp;
17045 asm volatile("# beginning __up_write\n\t"
17046 LOCK_PREFIX " xadd %1,(%2)\n\t"
17047+
17048+#ifdef CONFIG_PAX_REFCOUNT
17049+ "jno 0f\n"
17050+ "mov %1,(%2)\n"
17051+ "int $4\n0:\n"
17052+ _ASM_EXTABLE(0b, 0b)
17053+#endif
17054+
17055 /* subtracts 0xffff0001, returns the old value */
17056 " jns 1f\n\t"
17057 " call call_rwsem_wake\n" /* expects old value in %edx */
17058@@ -190,6 +230,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
17059 {
17060 asm volatile("# beginning __downgrade_write\n\t"
17061 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
17062+
17063+#ifdef CONFIG_PAX_REFCOUNT
17064+ "jno 0f\n"
17065+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
17066+ "int $4\n0:\n"
17067+ _ASM_EXTABLE(0b, 0b)
17068+#endif
17069+
17070 /*
17071 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
17072 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
17073@@ -208,7 +256,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
17074 */
17075 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
17076 {
17077- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
17078+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
17079+
17080+#ifdef CONFIG_PAX_REFCOUNT
17081+ "jno 0f\n"
17082+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
17083+ "int $4\n0:\n"
17084+ _ASM_EXTABLE(0b, 0b)
17085+#endif
17086+
17087 : "+m" (sem->count)
17088 : "er" (delta));
17089 }
17090@@ -218,7 +274,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
17091 */
17092 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
17093 {
17094- return delta + xadd(&sem->count, delta);
17095+ return delta + xadd_check_overflow(&sem->count, delta);
17096 }
17097
17098 #endif /* __KERNEL__ */
17099diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
17100index c48a950..bc40804 100644
17101--- a/arch/x86/include/asm/segment.h
17102+++ b/arch/x86/include/asm/segment.h
17103@@ -64,10 +64,15 @@
17104 * 26 - ESPFIX small SS
17105 * 27 - per-cpu [ offset to per-cpu data area ]
17106 * 28 - stack_canary-20 [ for stack protector ]
17107- * 29 - unused
17108- * 30 - unused
17109+ * 29 - PCI BIOS CS
17110+ * 30 - PCI BIOS DS
17111 * 31 - TSS for double fault handler
17112 */
17113+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
17114+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
17115+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
17116+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
17117+
17118 #define GDT_ENTRY_TLS_MIN 6
17119 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
17120
17121@@ -79,6 +84,8 @@
17122
17123 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
17124
17125+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
17126+
17127 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
17128
17129 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
17130@@ -104,6 +111,12 @@
17131 #define __KERNEL_STACK_CANARY 0
17132 #endif
17133
17134+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
17135+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
17136+
17137+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
17138+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
17139+
17140 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
17141
17142 /*
17143@@ -141,7 +154,7 @@
17144 */
17145
17146 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
17147-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
17148+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
17149
17150
17151 #else
17152@@ -165,6 +178,8 @@
17153 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
17154 #define __USER32_DS __USER_DS
17155
17156+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
17157+
17158 #define GDT_ENTRY_TSS 8 /* needs two entries */
17159 #define GDT_ENTRY_LDT 10 /* needs two entries */
17160 #define GDT_ENTRY_TLS_MIN 12
17161@@ -173,6 +188,8 @@
17162 #define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */
17163 #define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
17164
17165+#define GDT_ENTRY_UDEREF_KERNEL_DS 16
17166+
17167 /* TLS indexes for 64bit - hardcoded in arch_prctl */
17168 #define FS_TLS 0
17169 #define GS_TLS 1
17170@@ -180,12 +197,14 @@
17171 #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
17172 #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
17173
17174-#define GDT_ENTRIES 16
17175+#define GDT_ENTRIES 17
17176
17177 #endif
17178
17179 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
17180+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
17181 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
17182+#define __UDEREF_KERNEL_DS (GDT_ENTRY_UDEREF_KERNEL_DS*8)
17183 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
17184 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
17185 #ifndef CONFIG_PARAVIRT
17186@@ -265,7 +284,7 @@ static inline unsigned long get_limit(unsigned long segment)
17187 {
17188 unsigned long __limit;
17189 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
17190- return __limit + 1;
17191+ return __limit;
17192 }
17193
17194 #endif /* !__ASSEMBLY__ */
17195diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
17196index 8d3120f..352b440 100644
17197--- a/arch/x86/include/asm/smap.h
17198+++ b/arch/x86/include/asm/smap.h
17199@@ -25,11 +25,40 @@
17200
17201 #include <asm/alternative-asm.h>
17202
17203+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17204+#define ASM_PAX_OPEN_USERLAND \
17205+ 661: jmp 663f; \
17206+ .pushsection .altinstr_replacement, "a" ; \
17207+ 662: pushq %rax; nop; \
17208+ .popsection ; \
17209+ .pushsection .altinstructions, "a" ; \
17210+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
17211+ .popsection ; \
17212+ call __pax_open_userland; \
17213+ popq %rax; \
17214+ 663:
17215+
17216+#define ASM_PAX_CLOSE_USERLAND \
17217+ 661: jmp 663f; \
17218+ .pushsection .altinstr_replacement, "a" ; \
17219+ 662: pushq %rax; nop; \
17220+ .popsection; \
17221+ .pushsection .altinstructions, "a" ; \
17222+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
17223+ .popsection; \
17224+ call __pax_close_userland; \
17225+ popq %rax; \
17226+ 663:
17227+#else
17228+#define ASM_PAX_OPEN_USERLAND
17229+#define ASM_PAX_CLOSE_USERLAND
17230+#endif
17231+
17232 #ifdef CONFIG_X86_SMAP
17233
17234 #define ASM_CLAC \
17235 661: ASM_NOP3 ; \
17236- .pushsection .altinstr_replacement, "ax" ; \
17237+ .pushsection .altinstr_replacement, "a" ; \
17238 662: __ASM_CLAC ; \
17239 .popsection ; \
17240 .pushsection .altinstructions, "a" ; \
17241@@ -38,7 +67,7 @@
17242
17243 #define ASM_STAC \
17244 661: ASM_NOP3 ; \
17245- .pushsection .altinstr_replacement, "ax" ; \
17246+ .pushsection .altinstr_replacement, "a" ; \
17247 662: __ASM_STAC ; \
17248 .popsection ; \
17249 .pushsection .altinstructions, "a" ; \
17250@@ -56,6 +85,37 @@
17251
17252 #include <asm/alternative.h>
17253
17254+#define __HAVE_ARCH_PAX_OPEN_USERLAND
17255+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
17256+
17257+extern void __pax_open_userland(void);
17258+static __always_inline unsigned long pax_open_userland(void)
17259+{
17260+
17261+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17262+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[open]", X86_FEATURE_STRONGUDEREF)
17263+ :
17264+ : [open] "i" (__pax_open_userland)
17265+ : "memory", "rax");
17266+#endif
17267+
17268+ return 0;
17269+}
17270+
17271+extern void __pax_close_userland(void);
17272+static __always_inline unsigned long pax_close_userland(void)
17273+{
17274+
17275+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17276+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[close]", X86_FEATURE_STRONGUDEREF)
17277+ :
17278+ : [close] "i" (__pax_close_userland)
17279+ : "memory", "rax");
17280+#endif
17281+
17282+ return 0;
17283+}
17284+
17285 #ifdef CONFIG_X86_SMAP
17286
17287 static __always_inline void clac(void)
17288diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
17289index b073aae..39f9bdd 100644
17290--- a/arch/x86/include/asm/smp.h
17291+++ b/arch/x86/include/asm/smp.h
17292@@ -36,7 +36,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
17293 /* cpus sharing the last level cache: */
17294 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
17295 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
17296-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
17297+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
17298
17299 static inline struct cpumask *cpu_sibling_mask(int cpu)
17300 {
17301@@ -79,7 +79,7 @@ struct smp_ops {
17302
17303 void (*send_call_func_ipi)(const struct cpumask *mask);
17304 void (*send_call_func_single_ipi)(int cpu);
17305-};
17306+} __no_const;
17307
17308 /* Globals due to paravirt */
17309 extern void set_cpu_sibling_map(int cpu);
17310@@ -191,14 +191,8 @@ extern unsigned disabled_cpus __cpuinitdata;
17311 extern int safe_smp_processor_id(void);
17312
17313 #elif defined(CONFIG_X86_64_SMP)
17314-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
17315-
17316-#define stack_smp_processor_id() \
17317-({ \
17318- struct thread_info *ti; \
17319- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
17320- ti->cpu; \
17321-})
17322+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
17323+#define stack_smp_processor_id() raw_smp_processor_id()
17324 #define safe_smp_processor_id() smp_processor_id()
17325
17326 #endif
17327diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
17328index 33692ea..350a534 100644
17329--- a/arch/x86/include/asm/spinlock.h
17330+++ b/arch/x86/include/asm/spinlock.h
17331@@ -172,6 +172,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
17332 static inline void arch_read_lock(arch_rwlock_t *rw)
17333 {
17334 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
17335+
17336+#ifdef CONFIG_PAX_REFCOUNT
17337+ "jno 0f\n"
17338+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
17339+ "int $4\n0:\n"
17340+ _ASM_EXTABLE(0b, 0b)
17341+#endif
17342+
17343 "jns 1f\n"
17344 "call __read_lock_failed\n\t"
17345 "1:\n"
17346@@ -181,6 +189,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
17347 static inline void arch_write_lock(arch_rwlock_t *rw)
17348 {
17349 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
17350+
17351+#ifdef CONFIG_PAX_REFCOUNT
17352+ "jno 0f\n"
17353+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
17354+ "int $4\n0:\n"
17355+ _ASM_EXTABLE(0b, 0b)
17356+#endif
17357+
17358 "jz 1f\n"
17359 "call __write_lock_failed\n\t"
17360 "1:\n"
17361@@ -210,13 +226,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
17362
17363 static inline void arch_read_unlock(arch_rwlock_t *rw)
17364 {
17365- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
17366+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
17367+
17368+#ifdef CONFIG_PAX_REFCOUNT
17369+ "jno 0f\n"
17370+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
17371+ "int $4\n0:\n"
17372+ _ASM_EXTABLE(0b, 0b)
17373+#endif
17374+
17375 :"+m" (rw->lock) : : "memory");
17376 }
17377
17378 static inline void arch_write_unlock(arch_rwlock_t *rw)
17379 {
17380- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
17381+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
17382+
17383+#ifdef CONFIG_PAX_REFCOUNT
17384+ "jno 0f\n"
17385+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
17386+ "int $4\n0:\n"
17387+ _ASM_EXTABLE(0b, 0b)
17388+#endif
17389+
17390 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
17391 }
17392
17393diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
17394index 6a99859..03cb807 100644
17395--- a/arch/x86/include/asm/stackprotector.h
17396+++ b/arch/x86/include/asm/stackprotector.h
17397@@ -47,7 +47,7 @@
17398 * head_32 for boot CPU and setup_per_cpu_areas() for others.
17399 */
17400 #define GDT_STACK_CANARY_INIT \
17401- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
17402+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
17403
17404 /*
17405 * Initialize the stackprotector canary value.
17406@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
17407
17408 static inline void load_stack_canary_segment(void)
17409 {
17410-#ifdef CONFIG_X86_32
17411+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
17412 asm volatile ("mov %0, %%gs" : : "r" (0));
17413 #endif
17414 }
17415diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
17416index 70bbe39..4ae2bd4 100644
17417--- a/arch/x86/include/asm/stacktrace.h
17418+++ b/arch/x86/include/asm/stacktrace.h
17419@@ -11,28 +11,20 @@
17420
17421 extern int kstack_depth_to_print;
17422
17423-struct thread_info;
17424+struct task_struct;
17425 struct stacktrace_ops;
17426
17427-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
17428- unsigned long *stack,
17429- unsigned long bp,
17430- const struct stacktrace_ops *ops,
17431- void *data,
17432- unsigned long *end,
17433- int *graph);
17434+typedef unsigned long walk_stack_t(struct task_struct *task,
17435+ void *stack_start,
17436+ unsigned long *stack,
17437+ unsigned long bp,
17438+ const struct stacktrace_ops *ops,
17439+ void *data,
17440+ unsigned long *end,
17441+ int *graph);
17442
17443-extern unsigned long
17444-print_context_stack(struct thread_info *tinfo,
17445- unsigned long *stack, unsigned long bp,
17446- const struct stacktrace_ops *ops, void *data,
17447- unsigned long *end, int *graph);
17448-
17449-extern unsigned long
17450-print_context_stack_bp(struct thread_info *tinfo,
17451- unsigned long *stack, unsigned long bp,
17452- const struct stacktrace_ops *ops, void *data,
17453- unsigned long *end, int *graph);
17454+extern walk_stack_t print_context_stack;
17455+extern walk_stack_t print_context_stack_bp;
17456
17457 /* Generic stack tracer with callbacks */
17458
17459@@ -40,7 +32,7 @@ struct stacktrace_ops {
17460 void (*address)(void *data, unsigned long address, int reliable);
17461 /* On negative return stop dumping */
17462 int (*stack)(void *data, char *name);
17463- walk_stack_t walk_stack;
17464+ walk_stack_t *walk_stack;
17465 };
17466
17467 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
17468diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
17469index 4ec45b3..a4f0a8a 100644
17470--- a/arch/x86/include/asm/switch_to.h
17471+++ b/arch/x86/include/asm/switch_to.h
17472@@ -108,7 +108,7 @@ do { \
17473 "call __switch_to\n\t" \
17474 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
17475 __switch_canary \
17476- "movq %P[thread_info](%%rsi),%%r8\n\t" \
17477+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
17478 "movq %%rax,%%rdi\n\t" \
17479 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
17480 "jnz ret_from_fork\n\t" \
17481@@ -119,7 +119,7 @@ do { \
17482 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
17483 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
17484 [_tif_fork] "i" (_TIF_FORK), \
17485- [thread_info] "i" (offsetof(struct task_struct, stack)), \
17486+ [thread_info] "m" (current_tinfo), \
17487 [current_task] "m" (current_task) \
17488 __switch_canary_iparam \
17489 : "memory", "cc" __EXTRA_CLOBBER)
17490diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
17491index a1df6e8..e002940 100644
17492--- a/arch/x86/include/asm/thread_info.h
17493+++ b/arch/x86/include/asm/thread_info.h
17494@@ -10,6 +10,7 @@
17495 #include <linux/compiler.h>
17496 #include <asm/page.h>
17497 #include <asm/types.h>
17498+#include <asm/percpu.h>
17499
17500 /*
17501 * low level task data that entry.S needs immediate access to
17502@@ -23,7 +24,6 @@ struct exec_domain;
17503 #include <linux/atomic.h>
17504
17505 struct thread_info {
17506- struct task_struct *task; /* main task structure */
17507 struct exec_domain *exec_domain; /* execution domain */
17508 __u32 flags; /* low level flags */
17509 __u32 status; /* thread synchronous flags */
17510@@ -33,19 +33,13 @@ struct thread_info {
17511 mm_segment_t addr_limit;
17512 struct restart_block restart_block;
17513 void __user *sysenter_return;
17514-#ifdef CONFIG_X86_32
17515- unsigned long previous_esp; /* ESP of the previous stack in
17516- case of nested (IRQ) stacks
17517- */
17518- __u8 supervisor_stack[0];
17519-#endif
17520+ unsigned long lowest_stack;
17521 unsigned int sig_on_uaccess_error:1;
17522 unsigned int uaccess_err:1; /* uaccess failed */
17523 };
17524
17525-#define INIT_THREAD_INFO(tsk) \
17526+#define INIT_THREAD_INFO \
17527 { \
17528- .task = &tsk, \
17529 .exec_domain = &default_exec_domain, \
17530 .flags = 0, \
17531 .cpu = 0, \
17532@@ -56,7 +50,7 @@ struct thread_info {
17533 }, \
17534 }
17535
17536-#define init_thread_info (init_thread_union.thread_info)
17537+#define init_thread_info (init_thread_union.stack)
17538 #define init_stack (init_thread_union.stack)
17539
17540 #else /* !__ASSEMBLY__ */
17541@@ -97,6 +91,7 @@ struct thread_info {
17542 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
17543 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
17544 #define TIF_X32 30 /* 32-bit native x86-64 binary */
17545+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
17546
17547 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
17548 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
17549@@ -121,17 +116,18 @@ struct thread_info {
17550 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
17551 #define _TIF_ADDR32 (1 << TIF_ADDR32)
17552 #define _TIF_X32 (1 << TIF_X32)
17553+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
17554
17555 /* work to do in syscall_trace_enter() */
17556 #define _TIF_WORK_SYSCALL_ENTRY \
17557 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
17558 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
17559- _TIF_NOHZ)
17560+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
17561
17562 /* work to do in syscall_trace_leave() */
17563 #define _TIF_WORK_SYSCALL_EXIT \
17564 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
17565- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
17566+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
17567
17568 /* work to do on interrupt/exception return */
17569 #define _TIF_WORK_MASK \
17570@@ -142,7 +138,7 @@ struct thread_info {
17571 /* work to do on any return to user space */
17572 #define _TIF_ALLWORK_MASK \
17573 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
17574- _TIF_NOHZ)
17575+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
17576
17577 /* Only used for 64 bit */
17578 #define _TIF_DO_NOTIFY_MASK \
17579@@ -158,45 +154,40 @@ struct thread_info {
17580
17581 #define PREEMPT_ACTIVE 0x10000000
17582
17583-#ifdef CONFIG_X86_32
17584-
17585-#define STACK_WARN (THREAD_SIZE/8)
17586-/*
17587- * macros/functions for gaining access to the thread information structure
17588- *
17589- * preempt_count needs to be 1 initially, until the scheduler is functional.
17590- */
17591-#ifndef __ASSEMBLY__
17592-
17593-
17594-/* how to get the current stack pointer from C */
17595-register unsigned long current_stack_pointer asm("esp") __used;
17596-
17597-/* how to get the thread information struct from C */
17598-static inline struct thread_info *current_thread_info(void)
17599-{
17600- return (struct thread_info *)
17601- (current_stack_pointer & ~(THREAD_SIZE - 1));
17602-}
17603-
17604-#else /* !__ASSEMBLY__ */
17605-
17606+#ifdef __ASSEMBLY__
17607 /* how to get the thread information struct from ASM */
17608 #define GET_THREAD_INFO(reg) \
17609- movl $-THREAD_SIZE, reg; \
17610- andl %esp, reg
17611+ mov PER_CPU_VAR(current_tinfo), reg
17612
17613 /* use this one if reg already contains %esp */
17614-#define GET_THREAD_INFO_WITH_ESP(reg) \
17615- andl $-THREAD_SIZE, reg
17616+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
17617+#else
17618+/* how to get the thread information struct from C */
17619+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
17620+
17621+static __always_inline struct thread_info *current_thread_info(void)
17622+{
17623+ return this_cpu_read_stable(current_tinfo);
17624+}
17625+#endif
17626+
17627+#ifdef CONFIG_X86_32
17628+
17629+#define STACK_WARN (THREAD_SIZE/8)
17630+/*
17631+ * macros/functions for gaining access to the thread information structure
17632+ *
17633+ * preempt_count needs to be 1 initially, until the scheduler is functional.
17634+ */
17635+#ifndef __ASSEMBLY__
17636+
17637+/* how to get the current stack pointer from C */
17638+register unsigned long current_stack_pointer asm("esp") __used;
17639
17640 #endif
17641
17642 #else /* X86_32 */
17643
17644-#include <asm/percpu.h>
17645-#define KERNEL_STACK_OFFSET (5*8)
17646-
17647 /*
17648 * macros/functions for gaining access to the thread information structure
17649 * preempt_count needs to be 1 initially, until the scheduler is functional.
17650@@ -204,27 +195,8 @@ static inline struct thread_info *current_thread_info(void)
17651 #ifndef __ASSEMBLY__
17652 DECLARE_PER_CPU(unsigned long, kernel_stack);
17653
17654-static inline struct thread_info *current_thread_info(void)
17655-{
17656- struct thread_info *ti;
17657- ti = (void *)(this_cpu_read_stable(kernel_stack) +
17658- KERNEL_STACK_OFFSET - THREAD_SIZE);
17659- return ti;
17660-}
17661-
17662-#else /* !__ASSEMBLY__ */
17663-
17664-/* how to get the thread information struct from ASM */
17665-#define GET_THREAD_INFO(reg) \
17666- movq PER_CPU_VAR(kernel_stack),reg ; \
17667- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
17668-
17669-/*
17670- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
17671- * a certain register (to be used in assembler memory operands).
17672- */
17673-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
17674-
17675+/* how to get the current stack pointer from C */
17676+register unsigned long current_stack_pointer asm("rsp") __used;
17677 #endif
17678
17679 #endif /* !X86_32 */
17680@@ -283,5 +255,12 @@ static inline bool is_ia32_task(void)
17681 extern void arch_task_cache_init(void);
17682 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
17683 extern void arch_release_task_struct(struct task_struct *tsk);
17684+
17685+#define __HAVE_THREAD_FUNCTIONS
17686+#define task_thread_info(task) (&(task)->tinfo)
17687+#define task_stack_page(task) ((task)->stack)
17688+#define setup_thread_stack(p, org) do {} while (0)
17689+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
17690+
17691 #endif
17692 #endif /* _ASM_X86_THREAD_INFO_H */
17693diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
e2b79cd1 17694index 50a7fc0..45844c0 100644
bb5f0bf8
AF
17695--- a/arch/x86/include/asm/tlbflush.h
17696+++ b/arch/x86/include/asm/tlbflush.h
e2b79cd1 17697@@ -17,18 +17,44 @@
bb5f0bf8
AF
17698
17699 static inline void __native_flush_tlb(void)
17700 {
17701+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17702+ unsigned long descriptor[2];
e2b79cd1
AF
17703+
17704+ descriptor[0] = PCID_KERNEL;
bb5f0bf8
AF
17705+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_MONGLOBAL) : "memory");
17706+ return;
17707+ }
17708+
17709+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17710+ if (static_cpu_has(X86_FEATURE_PCID)) {
17711+ unsigned int cpu = raw_get_cpu();
17712+
17713+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17714+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17715+ raw_put_cpu_no_resched();
17716+ return;
17717+ }
17718+#endif
17719+
17720 native_write_cr3(native_read_cr3());
17721 }
17722
17723 static inline void __native_flush_tlb_global_irq_disabled(void)
17724 {
17725- unsigned long cr4;
17726+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17727+ unsigned long descriptor[2];
bb5f0bf8
AF
17728
17729- cr4 = native_read_cr4();
17730- /* clear PGE */
17731- native_write_cr4(cr4 & ~X86_CR4_PGE);
17732- /* write old PGE again and flush TLBs */
17733- native_write_cr4(cr4);
e2b79cd1
AF
17734+ descriptor[0] = PCID_KERNEL;
17735+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory");
17736+ } else {
17737+ unsigned long cr4;
17738+
bb5f0bf8
AF
17739+ cr4 = native_read_cr4();
17740+ /* clear PGE */
17741+ native_write_cr4(cr4 & ~X86_CR4_PGE);
17742+ /* write old PGE again and flush TLBs */
17743+ native_write_cr4(cr4);
17744+ }
17745 }
17746
17747 static inline void __native_flush_tlb_global(void)
e2b79cd1 17748@@ -49,6 +75,42 @@ static inline void __native_flush_tlb_global(void)
bb5f0bf8
AF
17749
17750 static inline void __native_flush_tlb_single(unsigned long addr)
17751 {
17752+
17753+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17754+ unsigned long descriptor[2];
17755+
17756+ descriptor[0] = PCID_KERNEL;
17757+ descriptor[1] = addr;
17758+
17759+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17760+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
17761+ if (addr < TASK_SIZE_MAX)
17762+ descriptor[1] += pax_user_shadow_base;
17763+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
17764+ }
17765+
17766+ descriptor[0] = PCID_USER;
17767+ descriptor[1] = addr;
17768+#endif
17769+
17770+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
17771+ return;
17772+ }
17773+
17774+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17775+ if (static_cpu_has(X86_FEATURE_PCID)) {
17776+ unsigned int cpu = raw_get_cpu();
17777+
17778+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
17779+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
17780+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17781+ raw_put_cpu_no_resched();
17782+
17783+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX)
17784+ addr += pax_user_shadow_base;
17785+ }
17786+#endif
17787+
17788 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
17789 }
17790
17791diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
17792index 5ee2687..74590b9 100644
17793--- a/arch/x86/include/asm/uaccess.h
17794+++ b/arch/x86/include/asm/uaccess.h
17795@@ -7,6 +7,7 @@
17796 #include <linux/compiler.h>
17797 #include <linux/thread_info.h>
17798 #include <linux/string.h>
17799+#include <linux/sched.h>
17800 #include <asm/asm.h>
17801 #include <asm/page.h>
17802 #include <asm/smap.h>
17803@@ -29,7 +30,12 @@
17804
17805 #define get_ds() (KERNEL_DS)
17806 #define get_fs() (current_thread_info()->addr_limit)
17807+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17808+void __set_fs(mm_segment_t x);
17809+void set_fs(mm_segment_t x);
17810+#else
17811 #define set_fs(x) (current_thread_info()->addr_limit = (x))
17812+#endif
17813
17814 #define segment_eq(a, b) ((a).seg == (b).seg)
17815
17816@@ -77,8 +83,33 @@
17817 * checks that the pointer is in the user space range - after calling
17818 * this function, memory access functions may still return -EFAULT.
17819 */
17820-#define access_ok(type, addr, size) \
17821- (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
17822+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
17823+#define access_ok(type, addr, size) \
17824+({ \
17825+ long __size = size; \
17826+ unsigned long __addr = (unsigned long)addr; \
17827+ unsigned long __addr_ao = __addr & PAGE_MASK; \
17828+ unsigned long __end_ao = __addr + __size - 1; \
17829+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
17830+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
17831+ while(__addr_ao <= __end_ao) { \
17832+ char __c_ao; \
17833+ __addr_ao += PAGE_SIZE; \
17834+ if (__size > PAGE_SIZE) \
17835+ cond_resched(); \
17836+ if (__get_user(__c_ao, (char __user *)__addr)) \
17837+ break; \
17838+ if (type != VERIFY_WRITE) { \
17839+ __addr = __addr_ao; \
17840+ continue; \
17841+ } \
17842+ if (__put_user(__c_ao, (char __user *)__addr)) \
17843+ break; \
17844+ __addr = __addr_ao; \
17845+ } \
17846+ } \
17847+ __ret_ao; \
17848+})
17849
17850 /*
17851 * The exception table consists of pairs of addresses relative to the
17852@@ -165,10 +196,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
17853 register __inttype(*(ptr)) __val_gu asm("%edx"); \
17854 __chk_user_ptr(ptr); \
17855 might_fault(); \
17856+ pax_open_userland(); \
17857 asm volatile("call __get_user_%P3" \
17858 : "=a" (__ret_gu), "=r" (__val_gu) \
17859 : "0" (ptr), "i" (sizeof(*(ptr)))); \
17860 (x) = (__typeof__(*(ptr))) __val_gu; \
17861+ pax_close_userland(); \
17862 __ret_gu; \
17863 })
17864
17865@@ -176,13 +209,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
17866 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
17867 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
17868
17869-
17870+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17871+#define __copyuser_seg "gs;"
17872+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
17873+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
17874+#else
17875+#define __copyuser_seg
17876+#define __COPYUSER_SET_ES
17877+#define __COPYUSER_RESTORE_ES
17878+#endif
17879
17880 #ifdef CONFIG_X86_32
17881 #define __put_user_asm_u64(x, addr, err, errret) \
17882 asm volatile(ASM_STAC "\n" \
17883- "1: movl %%eax,0(%2)\n" \
17884- "2: movl %%edx,4(%2)\n" \
17885+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
17886+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
17887 "3: " ASM_CLAC "\n" \
17888 ".section .fixup,\"ax\"\n" \
17889 "4: movl %3,%0\n" \
17890@@ -195,8 +236,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
17891
17892 #define __put_user_asm_ex_u64(x, addr) \
17893 asm volatile(ASM_STAC "\n" \
17894- "1: movl %%eax,0(%1)\n" \
17895- "2: movl %%edx,4(%1)\n" \
17896+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
17897+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
17898 "3: " ASM_CLAC "\n" \
17899 _ASM_EXTABLE_EX(1b, 2b) \
17900 _ASM_EXTABLE_EX(2b, 3b) \
17901@@ -246,7 +287,8 @@ extern void __put_user_8(void);
17902 __typeof__(*(ptr)) __pu_val; \
17903 __chk_user_ptr(ptr); \
17904 might_fault(); \
17905- __pu_val = x; \
17906+ __pu_val = (x); \
17907+ pax_open_userland(); \
17908 switch (sizeof(*(ptr))) { \
17909 case 1: \
17910 __put_user_x(1, __pu_val, ptr, __ret_pu); \
17911@@ -264,6 +306,7 @@ extern void __put_user_8(void);
17912 __put_user_x(X, __pu_val, ptr, __ret_pu); \
17913 break; \
17914 } \
17915+ pax_close_userland(); \
17916 __ret_pu; \
17917 })
17918
17919@@ -344,8 +387,10 @@ do { \
17920 } while (0)
17921
17922 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
17923+do { \
17924+ pax_open_userland(); \
17925 asm volatile(ASM_STAC "\n" \
17926- "1: mov"itype" %2,%"rtype"1\n" \
17927+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
17928 "2: " ASM_CLAC "\n" \
17929 ".section .fixup,\"ax\"\n" \
17930 "3: mov %3,%0\n" \
17931@@ -353,8 +398,10 @@ do { \
17932 " jmp 2b\n" \
17933 ".previous\n" \
17934 _ASM_EXTABLE(1b, 3b) \
17935- : "=r" (err), ltype(x) \
17936- : "m" (__m(addr)), "i" (errret), "0" (err))
17937+ : "=r" (err), ltype (x) \
17938+ : "m" (__m(addr)), "i" (errret), "0" (err)); \
17939+ pax_close_userland(); \
17940+} while (0)
17941
17942 #define __get_user_size_ex(x, ptr, size) \
17943 do { \
17944@@ -378,7 +425,7 @@ do { \
17945 } while (0)
17946
17947 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
17948- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
17949+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
17950 "2:\n" \
17951 _ASM_EXTABLE_EX(1b, 2b) \
17952 : ltype(x) : "m" (__m(addr)))
17953@@ -395,13 +442,24 @@ do { \
17954 int __gu_err; \
17955 unsigned long __gu_val; \
17956 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
17957- (x) = (__force __typeof__(*(ptr)))__gu_val; \
17958+ (x) = (__typeof__(*(ptr)))__gu_val; \
17959 __gu_err; \
17960 })
17961
17962 /* FIXME: this hack is definitely wrong -AK */
17963 struct __large_struct { unsigned long buf[100]; };
17964-#define __m(x) (*(struct __large_struct __user *)(x))
17965+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17966+#define ____m(x) \
17967+({ \
17968+ unsigned long ____x = (unsigned long)(x); \
17969+ if (____x < pax_user_shadow_base) \
17970+ ____x += pax_user_shadow_base; \
17971+ (typeof(x))____x; \
17972+})
17973+#else
17974+#define ____m(x) (x)
17975+#endif
17976+#define __m(x) (*(struct __large_struct __user *)____m(x))
17977
17978 /*
17979 * Tell gcc we read from memory instead of writing: this is because
17980@@ -409,8 +467,10 @@ struct __large_struct { unsigned long buf[100]; };
17981 * aliasing issues.
17982 */
17983 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
17984+do { \
17985+ pax_open_userland(); \
17986 asm volatile(ASM_STAC "\n" \
17987- "1: mov"itype" %"rtype"1,%2\n" \
17988+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
17989 "2: " ASM_CLAC "\n" \
17990 ".section .fixup,\"ax\"\n" \
17991 "3: mov %3,%0\n" \
17992@@ -418,10 +478,12 @@ struct __large_struct { unsigned long buf[100]; };
17993 ".previous\n" \
17994 _ASM_EXTABLE(1b, 3b) \
17995 : "=r"(err) \
17996- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
17997+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err));\
17998+ pax_close_userland(); \
17999+} while (0)
18000
18001 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
18002- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
18003+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
18004 "2:\n" \
18005 _ASM_EXTABLE_EX(1b, 2b) \
18006 : : ltype(x), "m" (__m(addr)))
18007@@ -431,11 +493,13 @@ struct __large_struct { unsigned long buf[100]; };
18008 */
18009 #define uaccess_try do { \
18010 current_thread_info()->uaccess_err = 0; \
18011+ pax_open_userland(); \
18012 stac(); \
18013 barrier();
18014
18015 #define uaccess_catch(err) \
18016 clac(); \
18017+ pax_close_userland(); \
18018 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
18019 } while (0)
18020
18021@@ -460,8 +524,12 @@ struct __large_struct { unsigned long buf[100]; };
18022 * On error, the variable @x is set to zero.
18023 */
18024
18025+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18026+#define __get_user(x, ptr) get_user((x), (ptr))
18027+#else
18028 #define __get_user(x, ptr) \
18029 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
18030+#endif
18031
18032 /**
18033 * __put_user: - Write a simple value into user space, with less checking.
18034@@ -483,8 +551,12 @@ struct __large_struct { unsigned long buf[100]; };
18035 * Returns zero on success, or -EFAULT on error.
18036 */
18037
18038+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18039+#define __put_user(x, ptr) put_user((x), (ptr))
18040+#else
18041 #define __put_user(x, ptr) \
18042 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
18043+#endif
18044
18045 #define __get_user_unaligned __get_user
18046 #define __put_user_unaligned __put_user
18047@@ -502,7 +574,7 @@ struct __large_struct { unsigned long buf[100]; };
18048 #define get_user_ex(x, ptr) do { \
18049 unsigned long __gue_val; \
18050 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
18051- (x) = (__force __typeof__(*(ptr)))__gue_val; \
18052+ (x) = (__typeof__(*(ptr)))__gue_val; \
18053 } while (0)
18054
18055 #define put_user_try uaccess_try
18056@@ -519,8 +591,8 @@ strncpy_from_user(char *dst, const char __user *src, long count);
18057 extern __must_check long strlen_user(const char __user *str);
18058 extern __must_check long strnlen_user(const char __user *str, long n);
18059
18060-unsigned long __must_check clear_user(void __user *mem, unsigned long len);
18061-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
18062+unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
18063+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
18064
18065 /*
18066 * movsl can be slow when source and dest are not both 8-byte aligned
18067diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
18068index 7f760a9..04b1c65 100644
18069--- a/arch/x86/include/asm/uaccess_32.h
18070+++ b/arch/x86/include/asm/uaccess_32.h
18071@@ -11,15 +11,15 @@
18072 #include <asm/page.h>
18073
18074 unsigned long __must_check __copy_to_user_ll
18075- (void __user *to, const void *from, unsigned long n);
18076+ (void __user *to, const void *from, unsigned long n) __size_overflow(3);
18077 unsigned long __must_check __copy_from_user_ll
18078- (void *to, const void __user *from, unsigned long n);
18079+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
18080 unsigned long __must_check __copy_from_user_ll_nozero
18081- (void *to, const void __user *from, unsigned long n);
18082+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
18083 unsigned long __must_check __copy_from_user_ll_nocache
18084- (void *to, const void __user *from, unsigned long n);
18085+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
18086 unsigned long __must_check __copy_from_user_ll_nocache_nozero
18087- (void *to, const void __user *from, unsigned long n);
18088+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
18089
18090 /**
18091 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
18092@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
18093 static __always_inline unsigned long __must_check
18094 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
18095 {
18096+ if ((long)n < 0)
18097+ return n;
18098+
18099+ check_object_size(from, n, true);
18100+
18101 if (__builtin_constant_p(n)) {
18102 unsigned long ret;
18103
18104@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
18105 __copy_to_user(void __user *to, const void *from, unsigned long n)
18106 {
18107 might_fault();
18108+
18109 return __copy_to_user_inatomic(to, from, n);
18110 }
18111
18112 static __always_inline unsigned long
18113 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
18114 {
18115+ if ((long)n < 0)
18116+ return n;
18117+
18118 /* Avoid zeroing the tail if the copy fails..
18119 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
18120 * but as the zeroing behaviour is only significant when n is not
18121@@ -137,6 +146,12 @@ static __always_inline unsigned long
18122 __copy_from_user(void *to, const void __user *from, unsigned long n)
18123 {
18124 might_fault();
18125+
18126+ if ((long)n < 0)
18127+ return n;
18128+
18129+ check_object_size(to, n, false);
18130+
18131 if (__builtin_constant_p(n)) {
18132 unsigned long ret;
18133
18134@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
18135 const void __user *from, unsigned long n)
18136 {
18137 might_fault();
18138+
18139+ if ((long)n < 0)
18140+ return n;
18141+
18142 if (__builtin_constant_p(n)) {
18143 unsigned long ret;
18144
18145@@ -181,15 +200,19 @@ static __always_inline unsigned long
18146 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
18147 unsigned long n)
18148 {
18149- return __copy_from_user_ll_nocache_nozero(to, from, n);
18150+ if ((long)n < 0)
18151+ return n;
18152+
18153+ return __copy_from_user_ll_nocache_nozero(to, from, n);
18154 }
18155
18156-unsigned long __must_check copy_to_user(void __user *to,
18157- const void *from, unsigned long n);
18158-unsigned long __must_check _copy_from_user(void *to,
18159- const void __user *from,
18160- unsigned long n);
18161-
18162+extern void copy_to_user_overflow(void)
18163+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
18164+ __compiletime_error("copy_to_user() buffer size is not provably correct")
18165+#else
18166+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
18167+#endif
18168+;
18169
18170 extern void copy_from_user_overflow(void)
18171 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
18172@@ -199,17 +222,60 @@ extern void copy_from_user_overflow(void)
18173 #endif
18174 ;
18175
18176-static inline unsigned long __must_check copy_from_user(void *to,
18177- const void __user *from,
18178- unsigned long n)
18179+/**
18180+ * copy_to_user: - Copy a block of data into user space.
18181+ * @to: Destination address, in user space.
18182+ * @from: Source address, in kernel space.
18183+ * @n: Number of bytes to copy.
18184+ *
18185+ * Context: User context only. This function may sleep.
18186+ *
18187+ * Copy data from kernel space to user space.
18188+ *
18189+ * Returns number of bytes that could not be copied.
18190+ * On success, this will be zero.
18191+ */
18192+static inline unsigned long __must_check
18193+copy_to_user(void __user *to, const void *from, unsigned long n)
18194 {
18195- int sz = __compiletime_object_size(to);
18196+ size_t sz = __compiletime_object_size(from);
18197
18198- if (likely(sz == -1 || sz >= n))
18199- n = _copy_from_user(to, from, n);
18200- else
18201+ if (unlikely(sz != (size_t)-1 && sz < n))
18202+ copy_to_user_overflow();
18203+ else if (access_ok(VERIFY_WRITE, to, n))
18204+ n = __copy_to_user(to, from, n);
18205+ return n;
18206+}
18207+
18208+/**
18209+ * copy_from_user: - Copy a block of data from user space.
18210+ * @to: Destination address, in kernel space.
18211+ * @from: Source address, in user space.
18212+ * @n: Number of bytes to copy.
18213+ *
18214+ * Context: User context only. This function may sleep.
18215+ *
18216+ * Copy data from user space to kernel space.
18217+ *
18218+ * Returns number of bytes that could not be copied.
18219+ * On success, this will be zero.
18220+ *
18221+ * If some data could not be copied, this function will pad the copied
18222+ * data to the requested size using zero bytes.
18223+ */
18224+static inline unsigned long __must_check
18225+copy_from_user(void *to, const void __user *from, unsigned long n)
18226+{
18227+ size_t sz = __compiletime_object_size(to);
18228+
18229+ check_object_size(to, n, false);
18230+
18231+ if (unlikely(sz != (size_t)-1 && sz < n))
18232 copy_from_user_overflow();
18233-
18234+ else if (access_ok(VERIFY_READ, from, n))
18235+ n = __copy_from_user(to, from, n);
18236+ else if ((long)n > 0)
18237+ memset(to, 0, n);
18238 return n;
18239 }
18240
18241diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
e2b79cd1 18242index 142810c..1dbe82f 100644
bb5f0bf8
AF
18243--- a/arch/x86/include/asm/uaccess_64.h
18244+++ b/arch/x86/include/asm/uaccess_64.h
18245@@ -10,6 +10,9 @@
18246 #include <asm/alternative.h>
18247 #include <asm/cpufeature.h>
18248 #include <asm/page.h>
18249+#include <asm/pgtable.h>
18250+
18251+#define set_fs(x) (current_thread_info()->addr_limit = (x))
18252
18253 /*
18254 * Copy To/From Userspace
18255@@ -17,13 +20,13 @@
18256
18257 /* Handles exceptions in both to and from, but doesn't do access_ok */
18258 __must_check unsigned long
18259-copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
18260+copy_user_enhanced_fast_string(void *to, const void *from, unsigned len) __size_overflow(3);
18261 __must_check unsigned long
18262-copy_user_generic_string(void *to, const void *from, unsigned len);
18263+copy_user_generic_string(void *to, const void *from, unsigned len) __size_overflow(3);
18264 __must_check unsigned long
18265-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
18266+copy_user_generic_unrolled(void *to, const void *from, unsigned len) __size_overflow(3);
18267
18268-static __always_inline __must_check unsigned long
18269+static __always_inline __must_check __size_overflow(3) unsigned long
18270 copy_user_generic(void *to, const void *from, unsigned len)
18271 {
18272 unsigned ret;
18273@@ -41,142 +44,204 @@ copy_user_generic(void *to, const void *from, unsigned len)
18274 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
18275 "=d" (len)),
18276 "1" (to), "2" (from), "3" (len)
18277- : "memory", "rcx", "r8", "r9", "r10", "r11");
18278+ : "memory", "rcx", "r8", "r9", "r11");
18279 return ret;
18280 }
18281
18282+static __always_inline __must_check unsigned long
18283+__copy_to_user(void __user *to, const void *from, unsigned long len);
18284+static __always_inline __must_check unsigned long
18285+__copy_from_user(void *to, const void __user *from, unsigned long len);
18286 __must_check unsigned long
18287-_copy_to_user(void __user *to, const void *from, unsigned len);
18288-__must_check unsigned long
18289-_copy_from_user(void *to, const void __user *from, unsigned len);
18290-__must_check unsigned long
18291-copy_in_user(void __user *to, const void __user *from, unsigned len);
18292+copy_in_user(void __user *to, const void __user *from, unsigned long len);
18293+
18294+extern void copy_to_user_overflow(void)
18295+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
18296+ __compiletime_error("copy_to_user() buffer size is not provably correct")
18297+#else
18298+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
18299+#endif
18300+;
18301+
18302+extern void copy_from_user_overflow(void)
18303+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
18304+ __compiletime_error("copy_from_user() buffer size is not provably correct")
18305+#else
18306+ __compiletime_warning("copy_from_user() buffer size is not provably correct")
18307+#endif
18308+;
18309
18310 static inline unsigned long __must_check copy_from_user(void *to,
18311 const void __user *from,
18312 unsigned long n)
18313 {
18314- int sz = __compiletime_object_size(to);
18315-
18316 might_fault();
18317- if (likely(sz == -1 || sz >= n))
18318- n = _copy_from_user(to, from, n);
18319-#ifdef CONFIG_DEBUG_VM
18320- else
18321- WARN(1, "Buffer overflow detected!\n");
18322-#endif
18323+
18324+ check_object_size(to, n, false);
18325+
18326+ if (access_ok(VERIFY_READ, from, n))
18327+ n = __copy_from_user(to, from, n);
18328+ else if (n < INT_MAX)
18329+ memset(to, 0, n);
18330 return n;
18331 }
18332
18333 static __always_inline __must_check
18334-int copy_to_user(void __user *dst, const void *src, unsigned size)
18335+int copy_to_user(void __user *dst, const void *src, unsigned long size)
18336 {
18337 might_fault();
18338
18339- return _copy_to_user(dst, src, size);
18340+ if (access_ok(VERIFY_WRITE, dst, size))
18341+ size = __copy_to_user(dst, src, size);
18342+ return size;
18343 }
18344
18345 static __always_inline __must_check
18346-int __copy_from_user(void *dst, const void __user *src, unsigned size)
18347+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
18348 {
18349- int ret = 0;
18350+ size_t sz = __compiletime_object_size(dst);
18351+ unsigned ret = 0;
18352
18353 might_fault();
18354+
18355+ if (size > INT_MAX)
18356+ return size;
18357+
18358+ check_object_size(dst, size, false);
18359+
18360+#ifdef CONFIG_PAX_MEMORY_UDEREF
18361+ if (!__access_ok(VERIFY_READ, src, size))
18362+ return size;
18363+#endif
18364+
18365+ if (unlikely(sz != (size_t)-1 && sz < size)) {
18366+ copy_from_user_overflow();
18367+ return size;
18368+ }
18369+
18370 if (!__builtin_constant_p(size))
18371- return copy_user_generic(dst, (__force void *)src, size);
18372+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
18373 switch (size) {
18374- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
18375+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
18376 ret, "b", "b", "=q", 1);
18377 return ret;
18378- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
18379+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
18380 ret, "w", "w", "=r", 2);
18381 return ret;
18382- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
18383+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
18384 ret, "l", "k", "=r", 4);
18385 return ret;
18386- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
18387+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
18388 ret, "q", "", "=r", 8);
18389 return ret;
18390 case 10:
18391- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
18392+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
18393 ret, "q", "", "=r", 10);
18394 if (unlikely(ret))
18395 return ret;
18396 __get_user_asm(*(u16 *)(8 + (char *)dst),
18397- (u16 __user *)(8 + (char __user *)src),
18398+ (const u16 __user *)(8 + (const char __user *)src),
18399 ret, "w", "w", "=r", 2);
18400 return ret;
18401 case 16:
18402- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
18403+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
18404 ret, "q", "", "=r", 16);
18405 if (unlikely(ret))
18406 return ret;
18407 __get_user_asm(*(u64 *)(8 + (char *)dst),
18408- (u64 __user *)(8 + (char __user *)src),
18409+ (const u64 __user *)(8 + (const char __user *)src),
18410 ret, "q", "", "=r", 8);
18411 return ret;
18412 default:
18413- return copy_user_generic(dst, (__force void *)src, size);
18414+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
18415 }
18416 }
18417
18418 static __always_inline __must_check
18419-int __copy_to_user(void __user *dst, const void *src, unsigned size)
18420+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
18421 {
18422- int ret = 0;
18423+ size_t sz = __compiletime_object_size(src);
18424+ unsigned ret = 0;
18425
18426 might_fault();
18427+
18428+ if (size > INT_MAX)
18429+ return size;
18430+
18431+ check_object_size(src, size, true);
18432+
18433+#ifdef CONFIG_PAX_MEMORY_UDEREF
18434+ if (!__access_ok(VERIFY_WRITE, dst, size))
18435+ return size;
18436+#endif
18437+
18438+ if (unlikely(sz != (size_t)-1 && sz < size)) {
18439+ copy_to_user_overflow();
18440+ return size;
18441+ }
18442+
18443 if (!__builtin_constant_p(size))
18444- return copy_user_generic((__force void *)dst, src, size);
18445+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
18446 switch (size) {
18447- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
18448+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
18449 ret, "b", "b", "iq", 1);
18450 return ret;
18451- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
18452+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
18453 ret, "w", "w", "ir", 2);
18454 return ret;
18455- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
18456+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
18457 ret, "l", "k", "ir", 4);
18458 return ret;
18459- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
18460+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
18461 ret, "q", "", "er", 8);
18462 return ret;
18463 case 10:
18464- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
18465+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
18466 ret, "q", "", "er", 10);
18467 if (unlikely(ret))
18468 return ret;
18469 asm("":::"memory");
18470- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
18471+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
18472 ret, "w", "w", "ir", 2);
18473 return ret;
18474 case 16:
18475- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
18476+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
18477 ret, "q", "", "er", 16);
18478 if (unlikely(ret))
18479 return ret;
18480 asm("":::"memory");
18481- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
18482+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
18483 ret, "q", "", "er", 8);
18484 return ret;
18485 default:
18486- return copy_user_generic((__force void *)dst, src, size);
18487+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
18488 }
18489 }
18490
18491 static __always_inline __must_check
18492-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
18493+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
18494 {
18495- int ret = 0;
18496+ unsigned ret = 0;
18497
18498 might_fault();
18499+
18500+ if (size > INT_MAX)
18501+ return size;
18502+
18503+#ifdef CONFIG_PAX_MEMORY_UDEREF
18504+ if (!__access_ok(VERIFY_READ, src, size))
18505+ return size;
18506+ if (!__access_ok(VERIFY_WRITE, dst, size))
18507+ return size;
18508+#endif
18509+
18510 if (!__builtin_constant_p(size))
18511- return copy_user_generic((__force void *)dst,
18512- (__force void *)src, size);
18513+ return copy_user_generic((__force_kernel void *)____m(dst),
18514+ (__force_kernel const void *)____m(src), size);
18515 switch (size) {
18516 case 1: {
18517 u8 tmp;
18518- __get_user_asm(tmp, (u8 __user *)src,
18519+ __get_user_asm(tmp, (const u8 __user *)src,
18520 ret, "b", "b", "=q", 1);
18521 if (likely(!ret))
18522 __put_user_asm(tmp, (u8 __user *)dst,
18523@@ -185,7 +250,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
18524 }
18525 case 2: {
18526 u16 tmp;
18527- __get_user_asm(tmp, (u16 __user *)src,
18528+ __get_user_asm(tmp, (const u16 __user *)src,
18529 ret, "w", "w", "=r", 2);
18530 if (likely(!ret))
18531 __put_user_asm(tmp, (u16 __user *)dst,
18532@@ -195,7 +260,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
18533
18534 case 4: {
18535 u32 tmp;
18536- __get_user_asm(tmp, (u32 __user *)src,
18537+ __get_user_asm(tmp, (const u32 __user *)src,
18538 ret, "l", "k", "=r", 4);
18539 if (likely(!ret))
18540 __put_user_asm(tmp, (u32 __user *)dst,
18541@@ -204,7 +269,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
18542 }
18543 case 8: {
18544 u64 tmp;
18545- __get_user_asm(tmp, (u64 __user *)src,
18546+ __get_user_asm(tmp, (const u64 __user *)src,
18547 ret, "q", "", "=r", 8);
18548 if (likely(!ret))
18549 __put_user_asm(tmp, (u64 __user *)dst,
18550@@ -212,41 +277,72 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
18551 return ret;
18552 }
18553 default:
18554- return copy_user_generic((__force void *)dst,
18555- (__force void *)src, size);
18556+ return copy_user_generic((__force_kernel void *)____m(dst),
18557+ (__force_kernel const void *)____m(src), size);
18558 }
18559 }
18560
e2b79cd1 18561-static __must_check __always_inline int
bb5f0bf8 18562-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
e2b79cd1 18563+static __must_check __always_inline unsigned long
bb5f0bf8
AF
18564+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
18565 {
18566- return copy_user_generic(dst, (__force const void *)src, size);
18567+ if (size > INT_MAX)
18568+ return size;
18569+
18570+#ifdef CONFIG_PAX_MEMORY_UDEREF
18571+ if (!__access_ok(VERIFY_READ, src, size))
18572+ return size;
18573+#endif
18574+
18575+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
18576 }
18577
18578-static __must_check __always_inline int
18579-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
18580+static __must_check __always_inline unsigned long
18581+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
18582 {
18583- return copy_user_generic((__force void *)dst, src, size);
18584+ if (size > INT_MAX)
18585+ return size;
18586+
18587+#ifdef CONFIG_PAX_MEMORY_UDEREF
18588+ if (!__access_ok(VERIFY_WRITE, dst, size))
18589+ return size;
18590+#endif
18591+
18592+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
18593 }
18594
18595-extern long __copy_user_nocache(void *dst, const void __user *src,
18596- unsigned size, int zerorest);
18597+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
18598+ unsigned long size, int zerorest) __size_overflow(3);
18599
18600-static inline int
18601-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
18602+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
18603 {
18604 might_sleep();
18605+
18606+ if (size > INT_MAX)
18607+ return size;
18608+
18609+#ifdef CONFIG_PAX_MEMORY_UDEREF
18610+ if (!__access_ok(VERIFY_READ, src, size))
18611+ return size;
18612+#endif
18613+
18614 return __copy_user_nocache(dst, src, size, 1);
18615 }
18616
18617-static inline int
18618-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
18619- unsigned size)
18620+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
18621+ unsigned long size)
18622 {
18623+ if (size > INT_MAX)
18624+ return size;
18625+
18626+#ifdef CONFIG_PAX_MEMORY_UDEREF
18627+ if (!__access_ok(VERIFY_READ, src, size))
18628+ return size;
18629+#endif
18630+
18631 return __copy_user_nocache(dst, src, size, 0);
18632 }
18633
18634-unsigned long
18635-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
18636+extern unsigned long
18637+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
18638
18639 #endif /* _ASM_X86_UACCESS_64_H */
18640diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
18641index 5b238981..77fdd78 100644
18642--- a/arch/x86/include/asm/word-at-a-time.h
18643+++ b/arch/x86/include/asm/word-at-a-time.h
18644@@ -11,7 +11,7 @@
18645 * and shift, for example.
18646 */
18647 struct word_at_a_time {
18648- const unsigned long one_bits, high_bits;
18649+ unsigned long one_bits, high_bits;
18650 };
18651
18652 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
18653diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
18654index d8d9922..bf6cecb 100644
18655--- a/arch/x86/include/asm/x86_init.h
18656+++ b/arch/x86/include/asm/x86_init.h
18657@@ -129,7 +129,7 @@ struct x86_init_ops {
18658 struct x86_init_timers timers;
18659 struct x86_init_iommu iommu;
18660 struct x86_init_pci pci;
18661-};
18662+} __no_const;
18663
18664 /**
18665 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
18666@@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
18667 void (*setup_percpu_clockev)(void);
18668 void (*early_percpu_clock_init)(void);
18669 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
18670-};
18671+} __no_const;
18672
18673 /**
18674 * struct x86_platform_ops - platform specific runtime functions
18675@@ -166,7 +166,7 @@ struct x86_platform_ops {
18676 void (*save_sched_clock_state)(void);
18677 void (*restore_sched_clock_state)(void);
18678 void (*apic_post_init)(void);
18679-};
18680+} __no_const;
18681
18682 struct pci_dev;
18683 struct msi_msg;
18684@@ -180,7 +180,7 @@ struct x86_msi_ops {
18685 void (*teardown_msi_irqs)(struct pci_dev *dev);
18686 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
18687 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
18688-};
18689+} __no_const;
18690
18691 struct IO_APIC_route_entry;
18692 struct io_apic_irq_attr;
18693@@ -201,7 +201,7 @@ struct x86_io_apic_ops {
18694 unsigned int destination, int vector,
18695 struct io_apic_irq_attr *attr);
18696 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
18697-};
18698+} __no_const;
18699
18700 extern struct x86_init_ops x86_init;
18701 extern struct x86_cpuinit_ops x86_cpuinit;
18702diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
18703index 0415cda..3b22adc 100644
18704--- a/arch/x86/include/asm/xsave.h
18705+++ b/arch/x86/include/asm/xsave.h
18706@@ -70,8 +70,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
18707 if (unlikely(err))
18708 return -EFAULT;
18709
18710+ pax_open_userland();
18711 __asm__ __volatile__(ASM_STAC "\n"
18712- "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
18713+ "1:"
18714+ __copyuser_seg
18715+ ".byte " REX_PREFIX "0x0f,0xae,0x27\n"
18716 "2: " ASM_CLAC "\n"
18717 ".section .fixup,\"ax\"\n"
18718 "3: movl $-1,%[err]\n"
18719@@ -81,18 +84,22 @@ static inline int xsave_user(struct xsave_struct __user *buf)
18720 : [err] "=r" (err)
18721 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
18722 : "memory");
18723+ pax_close_userland();
18724 return err;
18725 }
18726
18727 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
18728 {
18729 int err;
18730- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
18731+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
18732 u32 lmask = mask;
18733 u32 hmask = mask >> 32;
18734
18735+ pax_open_userland();
18736 __asm__ __volatile__(ASM_STAC "\n"
18737- "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
18738+ "1:"
18739+ __copyuser_seg
18740+ ".byte " REX_PREFIX "0x0f,0xae,0x2f\n"
18741 "2: " ASM_CLAC "\n"
18742 ".section .fixup,\"ax\"\n"
18743 "3: movl $-1,%[err]\n"
18744@@ -102,6 +109,7 @@ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
18745 : [err] "=r" (err)
18746 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
18747 : "memory"); /* memory required? */
18748+ pax_close_userland();
18749 return err;
18750 }
18751
18752diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
18753index bbae024..e1528f9 100644
18754--- a/arch/x86/include/uapi/asm/e820.h
18755+++ b/arch/x86/include/uapi/asm/e820.h
18756@@ -63,7 +63,7 @@ struct e820map {
18757 #define ISA_START_ADDRESS 0xa0000
18758 #define ISA_END_ADDRESS 0x100000
18759
18760-#define BIOS_BEGIN 0x000a0000
18761+#define BIOS_BEGIN 0x000c0000
18762 #define BIOS_END 0x00100000
18763
18764 #define BIOS_ROM_BASE 0xffe00000
18765diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
18766index 7bd3bd3..5dac791 100644
18767--- a/arch/x86/kernel/Makefile
18768+++ b/arch/x86/kernel/Makefile
18769@@ -22,7 +22,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
18770 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
18771 obj-$(CONFIG_IRQ_WORK) += irq_work.o
18772 obj-y += probe_roms.o
18773-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
18774+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
18775 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
18776 obj-y += syscall_$(BITS).o
18777 obj-$(CONFIG_X86_64) += vsyscall_64.o
18778diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
18779index 230c8ea..f915130 100644
18780--- a/arch/x86/kernel/acpi/boot.c
18781+++ b/arch/x86/kernel/acpi/boot.c
18782@@ -1361,7 +1361,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
18783 * If your system is blacklisted here, but you find that acpi=force
18784 * works for you, please contact linux-acpi@vger.kernel.org
18785 */
18786-static struct dmi_system_id __initdata acpi_dmi_table[] = {
18787+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
18788 /*
18789 * Boxes that need ACPI disabled
18790 */
18791@@ -1436,7 +1436,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
18792 };
18793
18794 /* second table for DMI checks that should run after early-quirks */
18795-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
18796+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
18797 /*
18798 * HP laptops which use a DSDT reporting as HP/SB400/10000,
18799 * which includes some code which overrides all temperature
18800diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
18801index ec94e11..7fbbec0 100644
18802--- a/arch/x86/kernel/acpi/sleep.c
18803+++ b/arch/x86/kernel/acpi/sleep.c
18804@@ -88,8 +88,12 @@ int acpi_suspend_lowlevel(void)
18805 #else /* CONFIG_64BIT */
18806 #ifdef CONFIG_SMP
18807 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
18808+
18809+ pax_open_kernel();
18810 early_gdt_descr.address =
18811 (unsigned long)get_cpu_gdt_table(smp_processor_id());
18812+ pax_close_kernel();
18813+
18814 initial_gs = per_cpu_offset(smp_processor_id());
18815 #endif
18816 initial_code = (unsigned long)wakeup_long64;
18817diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
18818index d1daa66..59fecba 100644
18819--- a/arch/x86/kernel/acpi/wakeup_32.S
18820+++ b/arch/x86/kernel/acpi/wakeup_32.S
18821@@ -29,13 +29,11 @@ wakeup_pmode_return:
18822 # and restore the stack ... but you need gdt for this to work
18823 movl saved_context_esp, %esp
18824
18825- movl %cs:saved_magic, %eax
18826- cmpl $0x12345678, %eax
18827+ cmpl $0x12345678, saved_magic
18828 jne bogus_magic
18829
18830 # jump to place where we left off
18831- movl saved_eip, %eax
18832- jmp *%eax
18833+ jmp *(saved_eip)
18834
18835 bogus_magic:
18836 jmp bogus_magic
18837diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
18838index c15cf9a..0e63558 100644
18839--- a/arch/x86/kernel/alternative.c
18840+++ b/arch/x86/kernel/alternative.c
18841@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
18842 */
18843 for (a = start; a < end; a++) {
18844 instr = (u8 *)&a->instr_offset + a->instr_offset;
18845+
18846+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18847+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
18848+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
18849+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
18850+#endif
18851+
18852 replacement = (u8 *)&a->repl_offset + a->repl_offset;
18853 BUG_ON(a->replacementlen > a->instrlen);
18854 BUG_ON(a->instrlen > sizeof(insnbuf));
18855@@ -299,10 +306,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
18856 for (poff = start; poff < end; poff++) {
18857 u8 *ptr = (u8 *)poff + *poff;
18858
18859+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18860+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
18861+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
18862+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
18863+#endif
18864+
18865 if (!*poff || ptr < text || ptr >= text_end)
18866 continue;
18867 /* turn DS segment override prefix into lock prefix */
18868- if (*ptr == 0x3e)
18869+ if (*ktla_ktva(ptr) == 0x3e)
18870 text_poke(ptr, ((unsigned char []){0xf0}), 1);
18871 }
18872 mutex_unlock(&text_mutex);
18873@@ -317,10 +330,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
18874 for (poff = start; poff < end; poff++) {
18875 u8 *ptr = (u8 *)poff + *poff;
18876
18877+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18878+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
18879+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
18880+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
18881+#endif
18882+
18883 if (!*poff || ptr < text || ptr >= text_end)
18884 continue;
18885 /* turn lock prefix into DS segment override prefix */
18886- if (*ptr == 0xf0)
18887+ if (*ktla_ktva(ptr) == 0xf0)
18888 text_poke(ptr, ((unsigned char []){0x3E}), 1);
18889 }
18890 mutex_unlock(&text_mutex);
18891@@ -468,7 +487,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
18892
18893 BUG_ON(p->len > MAX_PATCH_LEN);
18894 /* prep the buffer with the original instructions */
18895- memcpy(insnbuf, p->instr, p->len);
18896+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
18897 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
18898 (unsigned long)p->instr, p->len);
18899
18900@@ -515,7 +534,7 @@ void __init alternative_instructions(void)
18901 if (!uniproc_patched || num_possible_cpus() == 1)
18902 free_init_pages("SMP alternatives",
18903 (unsigned long)__smp_locks,
18904- (unsigned long)__smp_locks_end);
18905+ PAGE_ALIGN((unsigned long)__smp_locks_end));
18906 #endif
18907
18908 apply_paravirt(__parainstructions, __parainstructions_end);
18909@@ -535,13 +554,17 @@ void __init alternative_instructions(void)
18910 * instructions. And on the local CPU you need to be protected again NMI or MCE
18911 * handlers seeing an inconsistent instruction while you patch.
18912 */
18913-void *__init_or_module text_poke_early(void *addr, const void *opcode,
18914+void *__kprobes text_poke_early(void *addr, const void *opcode,
18915 size_t len)
18916 {
18917 unsigned long flags;
18918 local_irq_save(flags);
18919- memcpy(addr, opcode, len);
18920+
18921+ pax_open_kernel();
18922+ memcpy(ktla_ktva(addr), opcode, len);
18923 sync_core();
18924+ pax_close_kernel();
18925+
18926 local_irq_restore(flags);
18927 /* Could also do a CLFLUSH here to speed up CPU recovery; but
18928 that causes hangs on some VIA CPUs. */
18929@@ -563,36 +586,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
18930 */
18931 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
18932 {
18933- unsigned long flags;
18934- char *vaddr;
18935+ unsigned char *vaddr = ktla_ktva(addr);
18936 struct page *pages[2];
18937- int i;
18938+ size_t i;
18939
18940 if (!core_kernel_text((unsigned long)addr)) {
18941- pages[0] = vmalloc_to_page(addr);
18942- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
18943+ pages[0] = vmalloc_to_page(vaddr);
18944+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
18945 } else {
18946- pages[0] = virt_to_page(addr);
18947+ pages[0] = virt_to_page(vaddr);
18948 WARN_ON(!PageReserved(pages[0]));
18949- pages[1] = virt_to_page(addr + PAGE_SIZE);
18950+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
18951 }
18952 BUG_ON(!pages[0]);
18953- local_irq_save(flags);
18954- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
18955- if (pages[1])
18956- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
18957- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
18958- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
18959- clear_fixmap(FIX_TEXT_POKE0);
18960- if (pages[1])
18961- clear_fixmap(FIX_TEXT_POKE1);
18962- local_flush_tlb();
18963- sync_core();
18964- /* Could also do a CLFLUSH here to speed up CPU recovery; but
18965- that causes hangs on some VIA CPUs. */
18966+ text_poke_early(addr, opcode, len);
18967 for (i = 0; i < len; i++)
18968- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
18969- local_irq_restore(flags);
18970+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
18971 return addr;
18972 }
18973
18974diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
18975index 904611b..004dde6 100644
18976--- a/arch/x86/kernel/apic/apic.c
18977+++ b/arch/x86/kernel/apic/apic.c
18978@@ -189,7 +189,7 @@ int first_system_vector = 0xfe;
18979 /*
18980 * Debug level, exported for io_apic.c
18981 */
18982-unsigned int apic_verbosity;
18983+int apic_verbosity;
18984
18985 int pic_mode;
18986
18987@@ -1955,7 +1955,7 @@ void smp_error_interrupt(struct pt_regs *regs)
18988 apic_write(APIC_ESR, 0);
18989 v1 = apic_read(APIC_ESR);
18990 ack_APIC_irq();
18991- atomic_inc(&irq_err_count);
18992+ atomic_inc_unchecked(&irq_err_count);
18993
18994 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
18995 smp_processor_id(), v0 , v1);
18996diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
18997index 00c77cf..2dc6a2d 100644
18998--- a/arch/x86/kernel/apic/apic_flat_64.c
18999+++ b/arch/x86/kernel/apic/apic_flat_64.c
19000@@ -157,7 +157,7 @@ static int flat_probe(void)
19001 return 1;
19002 }
19003
19004-static struct apic apic_flat = {
19005+static struct apic apic_flat __read_only = {
19006 .name = "flat",
19007 .probe = flat_probe,
19008 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
19009@@ -271,7 +271,7 @@ static int physflat_probe(void)
19010 return 0;
19011 }
19012
19013-static struct apic apic_physflat = {
19014+static struct apic apic_physflat __read_only = {
19015
19016 .name = "physical flat",
19017 .probe = physflat_probe,
19018diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
19019index e145f28..2752888 100644
19020--- a/arch/x86/kernel/apic/apic_noop.c
19021+++ b/arch/x86/kernel/apic/apic_noop.c
19022@@ -119,7 +119,7 @@ static void noop_apic_write(u32 reg, u32 v)
19023 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
19024 }
19025
19026-struct apic apic_noop = {
19027+struct apic apic_noop __read_only = {
19028 .name = "noop",
19029 .probe = noop_probe,
19030 .acpi_madt_oem_check = NULL,
19031diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
19032index d50e364..543bee3 100644
19033--- a/arch/x86/kernel/apic/bigsmp_32.c
19034+++ b/arch/x86/kernel/apic/bigsmp_32.c
19035@@ -152,7 +152,7 @@ static int probe_bigsmp(void)
19036 return dmi_bigsmp;
19037 }
19038
19039-static struct apic apic_bigsmp = {
19040+static struct apic apic_bigsmp __read_only = {
19041
19042 .name = "bigsmp",
19043 .probe = probe_bigsmp,
19044diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
19045index 0874799..a7a7892 100644
19046--- a/arch/x86/kernel/apic/es7000_32.c
19047+++ b/arch/x86/kernel/apic/es7000_32.c
19048@@ -608,8 +608,7 @@ static int es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem,
19049 return ret && es7000_apic_is_cluster();
19050 }
19051
19052-/* We've been warned by a false positive warning.Use __refdata to keep calm. */
19053-static struct apic __refdata apic_es7000_cluster = {
19054+static struct apic apic_es7000_cluster __read_only = {
19055
19056 .name = "es7000",
19057 .probe = probe_es7000,
19058@@ -675,7 +674,7 @@ static struct apic __refdata apic_es7000_cluster = {
19059 .x86_32_early_logical_apicid = es7000_early_logical_apicid,
19060 };
19061
19062-static struct apic __refdata apic_es7000 = {
19063+static struct apic apic_es7000 __read_only = {
19064
19065 .name = "es7000",
19066 .probe = probe_es7000,
19067diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
19068index 9ed796c..e930fe4 100644
19069--- a/arch/x86/kernel/apic/io_apic.c
19070+++ b/arch/x86/kernel/apic/io_apic.c
19071@@ -1060,7 +1060,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
19072 }
19073 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
19074
19075-void lock_vector_lock(void)
19076+void lock_vector_lock(void) __acquires(vector_lock)
19077 {
19078 /* Used to the online set of cpus does not change
19079 * during assign_irq_vector.
19080@@ -1068,7 +1068,7 @@ void lock_vector_lock(void)
19081 raw_spin_lock(&vector_lock);
19082 }
19083
19084-void unlock_vector_lock(void)
19085+void unlock_vector_lock(void) __releases(vector_lock)
19086 {
19087 raw_spin_unlock(&vector_lock);
19088 }
19089@@ -2362,7 +2362,7 @@ static void ack_apic_edge(struct irq_data *data)
19090 ack_APIC_irq();
19091 }
19092
19093-atomic_t irq_mis_count;
19094+atomic_unchecked_t irq_mis_count;
19095
19096 #ifdef CONFIG_GENERIC_PENDING_IRQ
19097 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
19098@@ -2503,7 +2503,7 @@ static void ack_apic_level(struct irq_data *data)
19099 * at the cpu.
19100 */
19101 if (!(v & (1 << (i & 0x1f)))) {
19102- atomic_inc(&irq_mis_count);
19103+ atomic_inc_unchecked(&irq_mis_count);
19104
19105 eoi_ioapic_irq(irq, cfg);
19106 }
19107diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
19108index d661ee9..791fd33 100644
19109--- a/arch/x86/kernel/apic/numaq_32.c
19110+++ b/arch/x86/kernel/apic/numaq_32.c
19111@@ -455,8 +455,7 @@ static void numaq_setup_portio_remap(void)
19112 (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
19113 }
19114
19115-/* Use __refdata to keep false positive warning calm. */
19116-static struct apic __refdata apic_numaq = {
19117+static struct apic apic_numaq __read_only = {
19118
19119 .name = "NUMAQ",
19120 .probe = probe_numaq,
19121diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
19122index eb35ef9..f184a21 100644
19123--- a/arch/x86/kernel/apic/probe_32.c
19124+++ b/arch/x86/kernel/apic/probe_32.c
19125@@ -72,7 +72,7 @@ static int probe_default(void)
19126 return 1;
19127 }
19128
19129-static struct apic apic_default = {
19130+static struct apic apic_default __read_only = {
19131
19132 .name = "default",
19133 .probe = probe_default,
19134diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
19135index 77c95c0..434f8a4 100644
19136--- a/arch/x86/kernel/apic/summit_32.c
19137+++ b/arch/x86/kernel/apic/summit_32.c
19138@@ -486,7 +486,7 @@ void setup_summit(void)
19139 }
19140 #endif
19141
19142-static struct apic apic_summit = {
19143+static struct apic apic_summit __read_only = {
19144
19145 .name = "summit",
19146 .probe = probe_summit,
19147diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
19148index c88baa4..757aee1 100644
19149--- a/arch/x86/kernel/apic/x2apic_cluster.c
19150+++ b/arch/x86/kernel/apic/x2apic_cluster.c
19151@@ -183,7 +183,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
19152 return notifier_from_errno(err);
19153 }
19154
19155-static struct notifier_block __refdata x2apic_cpu_notifier = {
19156+static struct notifier_block x2apic_cpu_notifier = {
19157 .notifier_call = update_clusterinfo,
19158 };
19159
19160@@ -235,7 +235,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
19161 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
19162 }
19163
19164-static struct apic apic_x2apic_cluster = {
19165+static struct apic apic_x2apic_cluster __read_only = {
19166
19167 .name = "cluster x2apic",
19168 .probe = x2apic_cluster_probe,
19169diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
19170index 562a76d..a003c0f 100644
19171--- a/arch/x86/kernel/apic/x2apic_phys.c
19172+++ b/arch/x86/kernel/apic/x2apic_phys.c
19173@@ -89,7 +89,7 @@ static int x2apic_phys_probe(void)
19174 return apic == &apic_x2apic_phys;
19175 }
19176
19177-static struct apic apic_x2apic_phys = {
19178+static struct apic apic_x2apic_phys __read_only = {
19179
19180 .name = "physical x2apic",
19181 .probe = x2apic_phys_probe,
19182diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
19183index 794f6eb..67e1db2 100644
19184--- a/arch/x86/kernel/apic/x2apic_uv_x.c
19185+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
19186@@ -342,7 +342,7 @@ static int uv_probe(void)
19187 return apic == &apic_x2apic_uv_x;
19188 }
19189
19190-static struct apic __refdata apic_x2apic_uv_x = {
19191+static struct apic apic_x2apic_uv_x __read_only = {
19192
19193 .name = "UV large system",
19194 .probe = uv_probe,
19195diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
19196index 53a4e27..038760a 100644
19197--- a/arch/x86/kernel/apm_32.c
19198+++ b/arch/x86/kernel/apm_32.c
19199@@ -433,7 +433,7 @@ static DEFINE_MUTEX(apm_mutex);
19200 * This is for buggy BIOS's that refer to (real mode) segment 0x40
19201 * even though they are called in protected mode.
19202 */
19203-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
19204+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
19205 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
19206
19207 static const char driver_version[] = "1.16ac"; /* no spaces */
19208@@ -611,7 +611,10 @@ static long __apm_bios_call(void *_call)
19209 BUG_ON(cpu != 0);
19210 gdt = get_cpu_gdt_table(cpu);
19211 save_desc_40 = gdt[0x40 / 8];
19212+
19213+ pax_open_kernel();
19214 gdt[0x40 / 8] = bad_bios_desc;
19215+ pax_close_kernel();
19216
19217 apm_irq_save(flags);
19218 APM_DO_SAVE_SEGS;
19219@@ -620,7 +623,11 @@ static long __apm_bios_call(void *_call)
19220 &call->esi);
19221 APM_DO_RESTORE_SEGS;
19222 apm_irq_restore(flags);
19223+
19224+ pax_open_kernel();
19225 gdt[0x40 / 8] = save_desc_40;
19226+ pax_close_kernel();
19227+
19228 put_cpu();
19229
19230 return call->eax & 0xff;
19231@@ -687,7 +694,10 @@ static long __apm_bios_call_simple(void *_call)
19232 BUG_ON(cpu != 0);
19233 gdt = get_cpu_gdt_table(cpu);
19234 save_desc_40 = gdt[0x40 / 8];
19235+
19236+ pax_open_kernel();
19237 gdt[0x40 / 8] = bad_bios_desc;
19238+ pax_close_kernel();
19239
19240 apm_irq_save(flags);
19241 APM_DO_SAVE_SEGS;
19242@@ -695,7 +705,11 @@ static long __apm_bios_call_simple(void *_call)
19243 &call->eax);
19244 APM_DO_RESTORE_SEGS;
19245 apm_irq_restore(flags);
19246+
19247+ pax_open_kernel();
19248 gdt[0x40 / 8] = save_desc_40;
19249+ pax_close_kernel();
19250+
19251 put_cpu();
19252 return error;
19253 }
19254@@ -2362,12 +2376,15 @@ static int __init apm_init(void)
19255 * code to that CPU.
19256 */
19257 gdt = get_cpu_gdt_table(0);
19258+
19259+ pax_open_kernel();
19260 set_desc_base(&gdt[APM_CS >> 3],
19261 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
19262 set_desc_base(&gdt[APM_CS_16 >> 3],
19263 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
19264 set_desc_base(&gdt[APM_DS >> 3],
19265 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
19266+ pax_close_kernel();
19267
19268 proc_create("apm", 0, NULL, &apm_file_ops);
19269
19270diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
19271index 2861082..6d4718e 100644
19272--- a/arch/x86/kernel/asm-offsets.c
19273+++ b/arch/x86/kernel/asm-offsets.c
19274@@ -33,6 +33,8 @@ void common(void) {
19275 OFFSET(TI_status, thread_info, status);
19276 OFFSET(TI_addr_limit, thread_info, addr_limit);
19277 OFFSET(TI_preempt_count, thread_info, preempt_count);
19278+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
19279+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
19280
19281 BLANK();
19282 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
19283@@ -53,8 +55,26 @@ void common(void) {
19284 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
19285 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
19286 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
19287+
19288+#ifdef CONFIG_PAX_KERNEXEC
19289+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
19290 #endif
19291
19292+#ifdef CONFIG_PAX_MEMORY_UDEREF
19293+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
19294+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
19295+#ifdef CONFIG_X86_64
19296+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
19297+#endif
19298+#endif
19299+
19300+#endif
19301+
19302+ BLANK();
19303+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
19304+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
19305+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
19306+
19307 #ifdef CONFIG_XEN
19308 BLANK();
19309 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
19310diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
19311index e7c798b..2b2019b 100644
19312--- a/arch/x86/kernel/asm-offsets_64.c
19313+++ b/arch/x86/kernel/asm-offsets_64.c
19314@@ -77,6 +77,7 @@ int main(void)
19315 BLANK();
19316 #undef ENTRY
19317
19318+ DEFINE(TSS_size, sizeof(struct tss_struct));
19319 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
19320 BLANK();
19321
19322diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
19323index b0684e4..22ccfd7 100644
19324--- a/arch/x86/kernel/cpu/Makefile
19325+++ b/arch/x86/kernel/cpu/Makefile
19326@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
19327 CFLAGS_REMOVE_perf_event.o = -pg
19328 endif
19329
19330-# Make sure load_percpu_segment has no stackprotector
19331-nostackp := $(call cc-option, -fno-stack-protector)
19332-CFLAGS_common.o := $(nostackp)
19333-
19334 obj-y := intel_cacheinfo.o scattered.o topology.o
19335 obj-y += proc.o capflags.o powerflags.o common.o
19336 obj-y += rdrand.o
19337diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
19338index 5013a48..0782c53 100644
19339--- a/arch/x86/kernel/cpu/amd.c
19340+++ b/arch/x86/kernel/cpu/amd.c
19341@@ -744,7 +744,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
19342 unsigned int size)
19343 {
19344 /* AMD errata T13 (order #21922) */
19345- if ((c->x86 == 6)) {
19346+ if (c->x86 == 6) {
19347 /* Duron Rev A0 */
19348 if (c->x86_model == 3 && c->x86_mask == 0)
19349 size = 64;
19350diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
19351index 22018f7..df77e23 100644
19352--- a/arch/x86/kernel/cpu/common.c
19353+++ b/arch/x86/kernel/cpu/common.c
19354@@ -88,60 +88,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
19355
19356 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
19357
19358-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
19359-#ifdef CONFIG_X86_64
19360- /*
19361- * We need valid kernel segments for data and code in long mode too
19362- * IRET will check the segment types kkeil 2000/10/28
19363- * Also sysret mandates a special GDT layout
19364- *
19365- * TLS descriptors are currently at a different place compared to i386.
19366- * Hopefully nobody expects them at a fixed place (Wine?)
19367- */
19368- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
19369- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
19370- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
19371- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
19372- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
19373- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
19374-#else
19375- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
19376- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
19377- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
19378- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
19379- /*
19380- * Segments used for calling PnP BIOS have byte granularity.
19381- * They code segments and data segments have fixed 64k limits,
19382- * the transfer segment sizes are set at run time.
19383- */
19384- /* 32-bit code */
19385- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
19386- /* 16-bit code */
19387- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
19388- /* 16-bit data */
19389- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
19390- /* 16-bit data */
19391- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
19392- /* 16-bit data */
19393- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
19394- /*
19395- * The APM segments have byte granularity and their bases
19396- * are set at run time. All have 64k limits.
19397- */
19398- /* 32-bit code */
19399- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
19400- /* 16-bit code */
19401- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
19402- /* data */
19403- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
19404-
19405- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
19406- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
19407- GDT_STACK_CANARY_INIT
19408-#endif
19409-} };
19410-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
19411-
19412 static int __init x86_xsave_setup(char *s)
19413 {
19414 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
19415@@ -288,6 +234,57 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
19416 set_in_cr4(X86_CR4_SMAP);
19417 }
19418
19419+#ifdef CONFIG_X86_64
19420+static __init int setup_disable_pcid(char *arg)
19421+{
19422+ setup_clear_cpu_cap(X86_FEATURE_PCID);
19423+
19424+#ifdef CONFIG_PAX_MEMORY_UDEREF
19425+ if (clone_pgd_mask != ~(pgdval_t)0UL)
19426+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
19427+#endif
19428+
19429+ return 1;
19430+}
19431+__setup("nopcid", setup_disable_pcid);
19432+
19433+static void setup_pcid(struct cpuinfo_x86 *c)
19434+{
19435+ if (!cpu_has(c, X86_FEATURE_PCID)) {
19436+
19437+#ifdef CONFIG_PAX_MEMORY_UDEREF
19438+ if (clone_pgd_mask != ~(pgdval_t)0UL) {
19439+ pax_open_kernel();
19440+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
19441+ pax_close_kernel();
19442+ printk("PAX: slow and weak UDEREF enabled\n");
19443+ } else
19444+ printk("PAX: UDEREF disabled\n");
19445+#endif
19446+
19447+ return;
19448+ }
19449+
19450+ printk("PAX: PCID detected\n");
19451+ set_in_cr4(X86_CR4_PCIDE);
19452+
19453+#ifdef CONFIG_PAX_MEMORY_UDEREF
19454+ pax_open_kernel();
19455+ clone_pgd_mask = ~(pgdval_t)0UL;
19456+ pax_close_kernel();
19457+ if (pax_user_shadow_base)
19458+ printk("PAX: weak UDEREF enabled\n");
19459+ else {
19460+ set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
19461+ printk("PAX: strong UDEREF enabled\n");
19462+ }
19463+#endif
19464+
19465+ if (cpu_has(c, X86_FEATURE_INVPCID))
19466+ printk("PAX: INVPCID detected\n");
19467+}
19468+#endif
19469+
19470 /*
19471 * Some CPU features depend on higher CPUID levels, which may not always
19472 * be available due to CPUID level capping or broken virtualization
19473@@ -386,7 +383,7 @@ void switch_to_new_gdt(int cpu)
19474 {
19475 struct desc_ptr gdt_descr;
19476
19477- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
19478+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
19479 gdt_descr.size = GDT_SIZE - 1;
19480 load_gdt(&gdt_descr);
19481 /* Reload the per-cpu base */
19482@@ -874,6 +871,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
19483 setup_smep(c);
19484 setup_smap(c);
19485
19486+#ifdef CONFIG_X86_64
19487+ setup_pcid(c);
19488+#endif
19489+
19490 /*
19491 * The vendor-specific functions might have changed features.
19492 * Now we do "generic changes."
19493@@ -882,6 +883,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
19494 /* Filter out anything that depends on CPUID levels we don't have */
19495 filter_cpuid_features(c, true);
19496
19497+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
19498+ setup_clear_cpu_cap(X86_FEATURE_SEP);
19499+#endif
19500+
19501 /* If the model name is still unset, do table lookup. */
19502 if (!c->x86_model_id[0]) {
19503 const char *p;
19504@@ -1069,10 +1074,12 @@ static __init int setup_disablecpuid(char *arg)
19505 }
19506 __setup("clearcpuid=", setup_disablecpuid);
19507
19508+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
19509+EXPORT_PER_CPU_SYMBOL(current_tinfo);
19510+
19511 #ifdef CONFIG_X86_64
19512 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
19513-struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
19514- (unsigned long) nmi_idt_table };
19515+struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
19516
19517 DEFINE_PER_CPU_FIRST(union irq_stack_union,
19518 irq_stack_union) __aligned(PAGE_SIZE);
19519@@ -1086,7 +1093,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
19520 EXPORT_PER_CPU_SYMBOL(current_task);
19521
19522 DEFINE_PER_CPU(unsigned long, kernel_stack) =
19523- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
19524+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
19525 EXPORT_PER_CPU_SYMBOL(kernel_stack);
19526
19527 DEFINE_PER_CPU(char *, irq_stack_ptr) =
19528@@ -1231,7 +1238,7 @@ void __cpuinit cpu_init(void)
19529 load_ucode_ap();
19530
19531 cpu = stack_smp_processor_id();
19532- t = &per_cpu(init_tss, cpu);
19533+ t = init_tss + cpu;
19534 oist = &per_cpu(orig_ist, cpu);
19535
19536 #ifdef CONFIG_NUMA
19537@@ -1257,7 +1264,7 @@ void __cpuinit cpu_init(void)
19538 switch_to_new_gdt(cpu);
19539 loadsegment(fs, 0);
19540
19541- load_idt((const struct desc_ptr *)&idt_descr);
19542+ load_idt(&idt_descr);
19543
19544 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
19545 syscall_init();
19546@@ -1266,7 +1273,6 @@ void __cpuinit cpu_init(void)
19547 wrmsrl(MSR_KERNEL_GS_BASE, 0);
19548 barrier();
19549
19550- x86_configure_nx();
19551 enable_x2apic();
19552
19553 /*
19554@@ -1318,7 +1324,7 @@ void __cpuinit cpu_init(void)
19555 {
19556 int cpu = smp_processor_id();
19557 struct task_struct *curr = current;
19558- struct tss_struct *t = &per_cpu(init_tss, cpu);
19559+ struct tss_struct *t = init_tss + cpu;
19560 struct thread_struct *thread = &curr->thread;
19561
19562 show_ucode_info_early();
19563diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
19564index 7c6f7d5..8cac382 100644
19565--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
19566+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
19567@@ -1017,6 +1017,22 @@ static struct attribute *default_attrs[] = {
19568 };
19569
19570 #ifdef CONFIG_AMD_NB
19571+static struct attribute *default_attrs_amd_nb[] = {
19572+ &type.attr,
19573+ &level.attr,
19574+ &coherency_line_size.attr,
19575+ &physical_line_partition.attr,
19576+ &ways_of_associativity.attr,
19577+ &number_of_sets.attr,
19578+ &size.attr,
19579+ &shared_cpu_map.attr,
19580+ &shared_cpu_list.attr,
19581+ NULL,
19582+ NULL,
19583+ NULL,
19584+ NULL
19585+};
19586+
19587 static struct attribute ** __cpuinit amd_l3_attrs(void)
19588 {
19589 static struct attribute **attrs;
19590@@ -1027,18 +1043,7 @@ static struct attribute ** __cpuinit amd_l3_attrs(void)
19591
19592 n = ARRAY_SIZE(default_attrs);
19593
19594- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
19595- n += 2;
19596-
19597- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
19598- n += 1;
19599-
19600- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
19601- if (attrs == NULL)
19602- return attrs = default_attrs;
19603-
19604- for (n = 0; default_attrs[n]; n++)
19605- attrs[n] = default_attrs[n];
19606+ attrs = default_attrs_amd_nb;
19607
19608 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
19609 attrs[n++] = &cache_disable_0.attr;
19610@@ -1089,6 +1094,13 @@ static struct kobj_type ktype_cache = {
19611 .default_attrs = default_attrs,
19612 };
19613
19614+#ifdef CONFIG_AMD_NB
19615+static struct kobj_type ktype_cache_amd_nb = {
19616+ .sysfs_ops = &sysfs_ops,
19617+ .default_attrs = default_attrs_amd_nb,
19618+};
19619+#endif
19620+
19621 static struct kobj_type ktype_percpu_entry = {
19622 .sysfs_ops = &sysfs_ops,
19623 };
19624@@ -1154,20 +1166,26 @@ static int __cpuinit cache_add_dev(struct device *dev)
19625 return retval;
19626 }
19627
19628+#ifdef CONFIG_AMD_NB
19629+ amd_l3_attrs();
19630+#endif
19631+
19632 for (i = 0; i < num_cache_leaves; i++) {
19633+ struct kobj_type *ktype;
19634+
19635 this_object = INDEX_KOBJECT_PTR(cpu, i);
19636 this_object->cpu = cpu;
19637 this_object->index = i;
19638
19639 this_leaf = CPUID4_INFO_IDX(cpu, i);
19640
19641- ktype_cache.default_attrs = default_attrs;
19642+ ktype = &ktype_cache;
19643 #ifdef CONFIG_AMD_NB
19644 if (this_leaf->base.nb)
19645- ktype_cache.default_attrs = amd_l3_attrs();
19646+ ktype = &ktype_cache_amd_nb;
19647 #endif
19648 retval = kobject_init_and_add(&(this_object->kobj),
19649- &ktype_cache,
19650+ ktype,
19651 per_cpu(ici_cache_kobject, cpu),
19652 "index%1lu", i);
19653 if (unlikely(retval)) {
19654@@ -1222,7 +1240,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
19655 return NOTIFY_OK;
19656 }
19657
19658-static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
19659+static struct notifier_block cacheinfo_cpu_notifier = {
19660 .notifier_call = cacheinfo_cpu_callback,
19661 };
19662
19663diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
19664index 9239504..b2471ce 100644
19665--- a/arch/x86/kernel/cpu/mcheck/mce.c
19666+++ b/arch/x86/kernel/cpu/mcheck/mce.c
19667@@ -45,6 +45,7 @@
19668 #include <asm/processor.h>
19669 #include <asm/mce.h>
19670 #include <asm/msr.h>
19671+#include <asm/local.h>
19672
19673 #include "mce-internal.h"
19674
19675@@ -246,7 +247,7 @@ static void print_mce(struct mce *m)
19676 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
19677 m->cs, m->ip);
19678
19679- if (m->cs == __KERNEL_CS)
19680+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
19681 print_symbol("{%s}", m->ip);
19682 pr_cont("\n");
19683 }
19684@@ -279,10 +280,10 @@ static void print_mce(struct mce *m)
19685
19686 #define PANIC_TIMEOUT 5 /* 5 seconds */
19687
19688-static atomic_t mce_paniced;
19689+static atomic_unchecked_t mce_paniced;
19690
19691 static int fake_panic;
19692-static atomic_t mce_fake_paniced;
19693+static atomic_unchecked_t mce_fake_paniced;
19694
19695 /* Panic in progress. Enable interrupts and wait for final IPI */
19696 static void wait_for_panic(void)
19697@@ -306,7 +307,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
19698 /*
19699 * Make sure only one CPU runs in machine check panic
19700 */
19701- if (atomic_inc_return(&mce_paniced) > 1)
19702+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
19703 wait_for_panic();
19704 barrier();
19705
19706@@ -314,7 +315,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
19707 console_verbose();
19708 } else {
19709 /* Don't log too much for fake panic */
19710- if (atomic_inc_return(&mce_fake_paniced) > 1)
19711+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
19712 return;
19713 }
19714 /* First print corrected ones that are still unlogged */
19715@@ -353,7 +354,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
19716 if (!fake_panic) {
19717 if (panic_timeout == 0)
19718 panic_timeout = mca_cfg.panic_timeout;
19719- panic(msg);
19720+ panic("%s", msg);
19721 } else
19722 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
19723 }
19724@@ -683,7 +684,7 @@ static int mce_timed_out(u64 *t)
19725 * might have been modified by someone else.
19726 */
19727 rmb();
19728- if (atomic_read(&mce_paniced))
19729+ if (atomic_read_unchecked(&mce_paniced))
19730 wait_for_panic();
19731 if (!mca_cfg.monarch_timeout)
19732 goto out;
19733@@ -1654,7 +1655,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
19734 }
19735
19736 /* Call the installed machine check handler for this CPU setup. */
19737-void (*machine_check_vector)(struct pt_regs *, long error_code) =
19738+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
19739 unexpected_machine_check;
19740
19741 /*
19742@@ -1677,7 +1678,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
19743 return;
19744 }
19745
19746+ pax_open_kernel();
19747 machine_check_vector = do_machine_check;
19748+ pax_close_kernel();
19749
19750 __mcheck_cpu_init_generic();
19751 __mcheck_cpu_init_vendor(c);
19752@@ -1691,7 +1694,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
19753 */
19754
19755 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
19756-static int mce_chrdev_open_count; /* #times opened */
19757+static local_t mce_chrdev_open_count; /* #times opened */
19758 static int mce_chrdev_open_exclu; /* already open exclusive? */
19759
19760 static int mce_chrdev_open(struct inode *inode, struct file *file)
19761@@ -1699,7 +1702,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
19762 spin_lock(&mce_chrdev_state_lock);
19763
19764 if (mce_chrdev_open_exclu ||
19765- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
19766+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
19767 spin_unlock(&mce_chrdev_state_lock);
19768
19769 return -EBUSY;
19770@@ -1707,7 +1710,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
19771
19772 if (file->f_flags & O_EXCL)
19773 mce_chrdev_open_exclu = 1;
19774- mce_chrdev_open_count++;
19775+ local_inc(&mce_chrdev_open_count);
19776
19777 spin_unlock(&mce_chrdev_state_lock);
19778
19779@@ -1718,7 +1721,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
19780 {
19781 spin_lock(&mce_chrdev_state_lock);
19782
19783- mce_chrdev_open_count--;
19784+ local_dec(&mce_chrdev_open_count);
19785 mce_chrdev_open_exclu = 0;
19786
19787 spin_unlock(&mce_chrdev_state_lock);
19788@@ -2364,7 +2367,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
19789 return NOTIFY_OK;
19790 }
19791
19792-static struct notifier_block mce_cpu_notifier __cpuinitdata = {
19793+static struct notifier_block mce_cpu_notifier = {
19794 .notifier_call = mce_cpu_callback,
19795 };
19796
19797@@ -2374,7 +2377,7 @@ static __init void mce_init_banks(void)
19798
19799 for (i = 0; i < mca_cfg.banks; i++) {
19800 struct mce_bank *b = &mce_banks[i];
19801- struct device_attribute *a = &b->attr;
19802+ device_attribute_no_const *a = &b->attr;
19803
19804 sysfs_attr_init(&a->attr);
19805 a->attr.name = b->attrname;
19806@@ -2442,7 +2445,7 @@ struct dentry *mce_get_debugfs_dir(void)
19807 static void mce_reset(void)
19808 {
19809 cpu_missing = 0;
19810- atomic_set(&mce_fake_paniced, 0);
19811+ atomic_set_unchecked(&mce_fake_paniced, 0);
19812 atomic_set(&mce_executing, 0);
19813 atomic_set(&mce_callin, 0);
19814 atomic_set(&global_nwo, 0);
19815diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
19816index 1c044b1..37a2a43 100644
19817--- a/arch/x86/kernel/cpu/mcheck/p5.c
19818+++ b/arch/x86/kernel/cpu/mcheck/p5.c
19819@@ -11,6 +11,7 @@
19820 #include <asm/processor.h>
19821 #include <asm/mce.h>
19822 #include <asm/msr.h>
19823+#include <asm/pgtable.h>
19824
19825 /* By default disabled */
19826 int mce_p5_enabled __read_mostly;
19827@@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
19828 if (!cpu_has(c, X86_FEATURE_MCE))
19829 return;
19830
19831+ pax_open_kernel();
19832 machine_check_vector = pentium_machine_check;
19833+ pax_close_kernel();
19834 /* Make sure the vector pointer is visible before we enable MCEs: */
19835 wmb();
19836
19837diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
19838index 47a1870..8c019a7 100644
19839--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
19840+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
19841@@ -288,7 +288,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
19842 return notifier_from_errno(err);
19843 }
19844
19845-static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata =
19846+static struct notifier_block thermal_throttle_cpu_notifier =
19847 {
19848 .notifier_call = thermal_throttle_cpu_callback,
19849 };
19850diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
19851index e9a701a..35317d6 100644
19852--- a/arch/x86/kernel/cpu/mcheck/winchip.c
19853+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
19854@@ -10,6 +10,7 @@
19855 #include <asm/processor.h>
19856 #include <asm/mce.h>
19857 #include <asm/msr.h>
19858+#include <asm/pgtable.h>
19859
19860 /* Machine check handler for WinChip C6: */
19861 static void winchip_machine_check(struct pt_regs *regs, long error_code)
19862@@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
19863 {
19864 u32 lo, hi;
19865
19866+ pax_open_kernel();
19867 machine_check_vector = winchip_machine_check;
19868+ pax_close_kernel();
19869 /* Make sure the vector pointer is visible before we enable MCEs: */
19870 wmb();
19871
19872diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
19873index ca22b73..9987afe 100644
19874--- a/arch/x86/kernel/cpu/mtrr/main.c
19875+++ b/arch/x86/kernel/cpu/mtrr/main.c
19876@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
19877 u64 size_or_mask, size_and_mask;
19878 static bool mtrr_aps_delayed_init;
19879
19880-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
19881+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
19882
19883 const struct mtrr_ops *mtrr_if;
19884
19885diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
19886index df5e41f..816c719 100644
19887--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
19888+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
19889@@ -25,7 +25,7 @@ struct mtrr_ops {
19890 int (*validate_add_page)(unsigned long base, unsigned long size,
19891 unsigned int type);
19892 int (*have_wrcomb)(void);
19893-};
19894+} __do_const;
19895
19896 extern int generic_get_free_region(unsigned long base, unsigned long size,
19897 int replace_reg);
19898diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
19899index 1025f3c..824f677 100644
19900--- a/arch/x86/kernel/cpu/perf_event.c
19901+++ b/arch/x86/kernel/cpu/perf_event.c
19902@@ -1311,7 +1311,7 @@ static void __init pmu_check_apic(void)
19903 pr_info("no hardware sampling interrupt available.\n");
19904 }
19905
19906-static struct attribute_group x86_pmu_format_group = {
19907+static attribute_group_no_const x86_pmu_format_group = {
19908 .name = "format",
19909 .attrs = NULL,
19910 };
19911@@ -1410,7 +1410,7 @@ static struct attribute *events_attr[] = {
19912 NULL,
19913 };
19914
19915-static struct attribute_group x86_pmu_events_group = {
19916+static attribute_group_no_const x86_pmu_events_group = {
19917 .name = "events",
19918 .attrs = events_attr,
19919 };
19920@@ -1920,7 +1920,7 @@ static unsigned long get_segment_base(unsigned int segment)
19921 if (idx > GDT_ENTRIES)
19922 return 0;
19923
19924- desc = __this_cpu_ptr(&gdt_page.gdt[0]);
19925+ desc = get_cpu_gdt_table(smp_processor_id());
19926 }
19927
19928 return get_desc_base(desc + idx);
19929@@ -2010,7 +2010,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
19930 break;
19931
19932 perf_callchain_store(entry, frame.return_address);
19933- fp = frame.next_frame;
19934+ fp = (const void __force_user *)frame.next_frame;
19935 }
19936 }
19937
19938diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
19939index a9e2207..d70c83a 100644
19940--- a/arch/x86/kernel/cpu/perf_event_intel.c
19941+++ b/arch/x86/kernel/cpu/perf_event_intel.c
19942@@ -2022,10 +2022,10 @@ __init int intel_pmu_init(void)
19943 * v2 and above have a perf capabilities MSR
19944 */
19945 if (version > 1) {
19946- u64 capabilities;
19947+ u64 capabilities = x86_pmu.intel_cap.capabilities;
19948
19949- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
19950- x86_pmu.intel_cap.capabilities = capabilities;
19951+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
19952+ x86_pmu.intel_cap.capabilities = capabilities;
19953 }
19954
19955 intel_ds_init();
19956diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
19957index 8aac56b..588fb13 100644
19958--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
19959+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
19960@@ -3093,7 +3093,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
19961 static int __init uncore_type_init(struct intel_uncore_type *type)
19962 {
19963 struct intel_uncore_pmu *pmus;
19964- struct attribute_group *attr_group;
19965+ attribute_group_no_const *attr_group;
19966 struct attribute **attrs;
19967 int i, j;
19968
19969@@ -3518,7 +3518,7 @@ static int
19970 return NOTIFY_OK;
19971 }
19972
19973-static struct notifier_block uncore_cpu_nb __cpuinitdata = {
19974+static struct notifier_block uncore_cpu_nb = {
19975 .notifier_call = uncore_cpu_notifier,
19976 /*
19977 * to migrate uncore events, our notifier should be executed
19978diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
19979index f952891..4722ad4 100644
19980--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
19981+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
19982@@ -488,7 +488,7 @@ struct intel_uncore_box {
19983 struct uncore_event_desc {
19984 struct kobj_attribute attr;
19985 const char *config;
19986-};
19987+} __do_const;
19988
19989 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \
19990 { \
19991diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
19992index 1e4dbcf..b9a34c2 100644
19993--- a/arch/x86/kernel/cpuid.c
19994+++ b/arch/x86/kernel/cpuid.c
19995@@ -171,7 +171,7 @@ static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb,
19996 return notifier_from_errno(err);
19997 }
19998
19999-static struct notifier_block __refdata cpuid_class_cpu_notifier =
20000+static struct notifier_block cpuid_class_cpu_notifier =
20001 {
20002 .notifier_call = cpuid_class_cpu_callback,
20003 };
20004diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
20005index 74467fe..18793d5 100644
20006--- a/arch/x86/kernel/crash.c
20007+++ b/arch/x86/kernel/crash.c
20008@@ -58,10 +58,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
20009 {
20010 #ifdef CONFIG_X86_32
20011 struct pt_regs fixed_regs;
20012-#endif
20013
20014-#ifdef CONFIG_X86_32
20015- if (!user_mode_vm(regs)) {
20016+ if (!user_mode(regs)) {
20017 crash_fixup_ss_esp(&fixed_regs, regs);
20018 regs = &fixed_regs;
20019 }
20020diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
20021index afa64ad..dce67dd 100644
20022--- a/arch/x86/kernel/crash_dump_64.c
20023+++ b/arch/x86/kernel/crash_dump_64.c
20024@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
20025 return -ENOMEM;
20026
20027 if (userbuf) {
20028- if (copy_to_user(buf, vaddr + offset, csize)) {
20029+ if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
20030 iounmap(vaddr);
20031 return -EFAULT;
20032 }
20033diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
20034index 155a13f..1672b9b 100644
20035--- a/arch/x86/kernel/doublefault_32.c
20036+++ b/arch/x86/kernel/doublefault_32.c
20037@@ -11,7 +11,7 @@
20038
20039 #define DOUBLEFAULT_STACKSIZE (1024)
20040 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
20041-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
20042+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
20043
20044 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
20045
20046@@ -21,7 +21,7 @@ static void doublefault_fn(void)
20047 unsigned long gdt, tss;
20048
20049 native_store_gdt(&gdt_desc);
20050- gdt = gdt_desc.address;
20051+ gdt = (unsigned long)gdt_desc.address;
20052
20053 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
20054
20055@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
20056 /* 0x2 bit is always set */
20057 .flags = X86_EFLAGS_SF | 0x2,
20058 .sp = STACK_START,
20059- .es = __USER_DS,
20060+ .es = __KERNEL_DS,
20061 .cs = __KERNEL_CS,
20062 .ss = __KERNEL_DS,
20063- .ds = __USER_DS,
20064+ .ds = __KERNEL_DS,
20065 .fs = __KERNEL_PERCPU,
20066
20067 .__cr3 = __pa_nodebug(swapper_pg_dir),
20068diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
20069index deb6421..76bbc12 100644
20070--- a/arch/x86/kernel/dumpstack.c
20071+++ b/arch/x86/kernel/dumpstack.c
20072@@ -2,6 +2,9 @@
20073 * Copyright (C) 1991, 1992 Linus Torvalds
20074 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
20075 */
20076+#ifdef CONFIG_GRKERNSEC_HIDESYM
20077+#define __INCLUDED_BY_HIDESYM 1
20078+#endif
20079 #include <linux/kallsyms.h>
20080 #include <linux/kprobes.h>
20081 #include <linux/uaccess.h>
20082@@ -35,16 +38,14 @@ void printk_address(unsigned long address, int reliable)
20083 static void
20084 print_ftrace_graph_addr(unsigned long addr, void *data,
20085 const struct stacktrace_ops *ops,
20086- struct thread_info *tinfo, int *graph)
20087+ struct task_struct *task, int *graph)
20088 {
20089- struct task_struct *task;
20090 unsigned long ret_addr;
20091 int index;
20092
20093 if (addr != (unsigned long)return_to_handler)
20094 return;
20095
20096- task = tinfo->task;
20097 index = task->curr_ret_stack;
20098
20099 if (!task->ret_stack || index < *graph)
20100@@ -61,7 +62,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
20101 static inline void
20102 print_ftrace_graph_addr(unsigned long addr, void *data,
20103 const struct stacktrace_ops *ops,
20104- struct thread_info *tinfo, int *graph)
20105+ struct task_struct *task, int *graph)
20106 { }
20107 #endif
20108
20109@@ -72,10 +73,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
20110 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
20111 */
20112
20113-static inline int valid_stack_ptr(struct thread_info *tinfo,
20114- void *p, unsigned int size, void *end)
20115+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
20116 {
20117- void *t = tinfo;
20118 if (end) {
20119 if (p < end && p >= (end-THREAD_SIZE))
20120 return 1;
20121@@ -86,14 +85,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
20122 }
20123
20124 unsigned long
20125-print_context_stack(struct thread_info *tinfo,
20126+print_context_stack(struct task_struct *task, void *stack_start,
20127 unsigned long *stack, unsigned long bp,
20128 const struct stacktrace_ops *ops, void *data,
20129 unsigned long *end, int *graph)
20130 {
20131 struct stack_frame *frame = (struct stack_frame *)bp;
20132
20133- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
20134+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
20135 unsigned long addr;
20136
20137 addr = *stack;
20138@@ -105,7 +104,7 @@ print_context_stack(struct thread_info *tinfo,
20139 } else {
20140 ops->address(data, addr, 0);
20141 }
20142- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
20143+ print_ftrace_graph_addr(addr, data, ops, task, graph);
20144 }
20145 stack++;
20146 }
20147@@ -114,7 +113,7 @@ print_context_stack(struct thread_info *tinfo,
20148 EXPORT_SYMBOL_GPL(print_context_stack);
20149
20150 unsigned long
20151-print_context_stack_bp(struct thread_info *tinfo,
20152+print_context_stack_bp(struct task_struct *task, void *stack_start,
20153 unsigned long *stack, unsigned long bp,
20154 const struct stacktrace_ops *ops, void *data,
20155 unsigned long *end, int *graph)
20156@@ -122,7 +121,7 @@ print_context_stack_bp(struct thread_info *tinfo,
20157 struct stack_frame *frame = (struct stack_frame *)bp;
20158 unsigned long *ret_addr = &frame->return_address;
20159
20160- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
20161+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
20162 unsigned long addr = *ret_addr;
20163
20164 if (!__kernel_text_address(addr))
20165@@ -131,7 +130,7 @@ print_context_stack_bp(struct thread_info *tinfo,
20166 ops->address(data, addr, 1);
20167 frame = frame->next_frame;
20168 ret_addr = &frame->return_address;
20169- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
20170+ print_ftrace_graph_addr(addr, data, ops, task, graph);
20171 }
20172
20173 return (unsigned long)frame;
20174@@ -150,7 +149,7 @@ static int print_trace_stack(void *data, char *name)
20175 static void print_trace_address(void *data, unsigned long addr, int reliable)
20176 {
20177 touch_nmi_watchdog();
20178- printk(data);
20179+ printk("%s", (char *)data);
20180 printk_address(addr, reliable);
20181 }
20182
20183@@ -219,6 +218,8 @@ unsigned __kprobes long oops_begin(void)
20184 }
20185 EXPORT_SYMBOL_GPL(oops_begin);
20186
20187+extern void gr_handle_kernel_exploit(void);
20188+
20189 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
20190 {
20191 if (regs && kexec_should_crash(current))
20192@@ -240,7 +241,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
20193 panic("Fatal exception in interrupt");
20194 if (panic_on_oops)
20195 panic("Fatal exception");
20196- do_exit(signr);
20197+
20198+ gr_handle_kernel_exploit();
20199+
20200+ do_group_exit(signr);
20201 }
20202
20203 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
20204@@ -268,7 +272,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
20205 print_modules();
20206 show_regs(regs);
20207 #ifdef CONFIG_X86_32
20208- if (user_mode_vm(regs)) {
20209+ if (user_mode(regs)) {
20210 sp = regs->sp;
20211 ss = regs->ss & 0xffff;
20212 } else {
20213@@ -296,7 +300,7 @@ void die(const char *str, struct pt_regs *regs, long err)
20214 unsigned long flags = oops_begin();
20215 int sig = SIGSEGV;
20216
20217- if (!user_mode_vm(regs))
20218+ if (!user_mode(regs))
20219 report_bug(regs->ip, regs);
20220
20221 if (__die(str, regs, err))
20222diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
20223index f2a1770..540657f 100644
20224--- a/arch/x86/kernel/dumpstack_32.c
20225+++ b/arch/x86/kernel/dumpstack_32.c
20226@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
20227 bp = stack_frame(task, regs);
20228
20229 for (;;) {
20230- struct thread_info *context;
20231+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
20232
20233- context = (struct thread_info *)
20234- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
20235- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
20236+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
20237
20238- stack = (unsigned long *)context->previous_esp;
20239- if (!stack)
20240+ if (stack_start == task_stack_page(task))
20241 break;
20242+ stack = *(unsigned long **)stack_start;
20243 if (ops->stack(data, "IRQ") < 0)
20244 break;
20245 touch_nmi_watchdog();
20246@@ -87,27 +85,28 @@ void show_regs(struct pt_regs *regs)
20247 int i;
20248
20249 show_regs_print_info(KERN_EMERG);
20250- __show_regs(regs, !user_mode_vm(regs));
20251+ __show_regs(regs, !user_mode(regs));
20252
20253 /*
20254 * When in-kernel, we also print out the stack and code at the
20255 * time of the fault..
20256 */
20257- if (!user_mode_vm(regs)) {
20258+ if (!user_mode(regs)) {
20259 unsigned int code_prologue = code_bytes * 43 / 64;
20260 unsigned int code_len = code_bytes;
20261 unsigned char c;
20262 u8 *ip;
20263+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
20264
20265 pr_emerg("Stack:\n");
20266 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
20267
20268 pr_emerg("Code:");
20269
20270- ip = (u8 *)regs->ip - code_prologue;
20271+ ip = (u8 *)regs->ip - code_prologue + cs_base;
20272 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
20273 /* try starting at IP */
20274- ip = (u8 *)regs->ip;
20275+ ip = (u8 *)regs->ip + cs_base;
20276 code_len = code_len - code_prologue + 1;
20277 }
20278 for (i = 0; i < code_len; i++, ip++) {
20279@@ -116,7 +115,7 @@ void show_regs(struct pt_regs *regs)
20280 pr_cont(" Bad EIP value.");
20281 break;
20282 }
20283- if (ip == (u8 *)regs->ip)
20284+ if (ip == (u8 *)regs->ip + cs_base)
20285 pr_cont(" <%02x>", c);
20286 else
20287 pr_cont(" %02x", c);
20288@@ -129,6 +128,7 @@ int is_valid_bugaddr(unsigned long ip)
20289 {
20290 unsigned short ud2;
20291
20292+ ip = ktla_ktva(ip);
20293 if (ip < PAGE_OFFSET)
20294 return 0;
20295 if (probe_kernel_address((unsigned short *)ip, ud2))
20296@@ -136,3 +136,15 @@ int is_valid_bugaddr(unsigned long ip)
20297
20298 return ud2 == 0x0b0f;
20299 }
20300+
20301+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
20302+void pax_check_alloca(unsigned long size)
20303+{
20304+ unsigned long sp = (unsigned long)&sp, stack_left;
20305+
20306+ /* all kernel stacks are of the same size */
20307+ stack_left = sp & (THREAD_SIZE - 1);
20308+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
20309+}
20310+EXPORT_SYMBOL(pax_check_alloca);
20311+#endif
20312diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
20313index addb207..99635fa 100644
20314--- a/arch/x86/kernel/dumpstack_64.c
20315+++ b/arch/x86/kernel/dumpstack_64.c
20316@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
20317 unsigned long *irq_stack_end =
20318 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
20319 unsigned used = 0;
20320- struct thread_info *tinfo;
20321 int graph = 0;
20322 unsigned long dummy;
20323+ void *stack_start;
20324
20325 if (!task)
20326 task = current;
20327@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
20328 * current stack address. If the stacks consist of nested
20329 * exceptions
20330 */
20331- tinfo = task_thread_info(task);
20332 for (;;) {
20333 char *id;
20334 unsigned long *estack_end;
20335+
20336 estack_end = in_exception_stack(cpu, (unsigned long)stack,
20337 &used, &id);
20338
20339@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
20340 if (ops->stack(data, id) < 0)
20341 break;
20342
20343- bp = ops->walk_stack(tinfo, stack, bp, ops,
20344+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
20345 data, estack_end, &graph);
20346 ops->stack(data, "<EOE>");
20347 /*
20348@@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
20349 * second-to-last pointer (index -2 to end) in the
20350 * exception stack:
20351 */
20352+ if ((u16)estack_end[-1] != __KERNEL_DS)
20353+ goto out;
20354 stack = (unsigned long *) estack_end[-2];
20355 continue;
20356 }
20357@@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
20358 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
20359 if (ops->stack(data, "IRQ") < 0)
20360 break;
20361- bp = ops->walk_stack(tinfo, stack, bp,
20362+ bp = ops->walk_stack(task, irq_stack, stack, bp,
20363 ops, data, irq_stack_end, &graph);
20364 /*
20365 * We link to the next stack (which would be
20366@@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
20367 /*
20368 * This handles the process stack:
20369 */
20370- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
20371+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
20372+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
20373+out:
20374 put_cpu();
20375 }
20376 EXPORT_SYMBOL(dump_trace);
20377@@ -300,3 +304,50 @@ int is_valid_bugaddr(unsigned long ip)
20378
20379 return ud2 == 0x0b0f;
20380 }
20381+
20382+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
20383+void pax_check_alloca(unsigned long size)
20384+{
20385+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
20386+ unsigned cpu, used;
20387+ char *id;
20388+
20389+ /* check the process stack first */
20390+ stack_start = (unsigned long)task_stack_page(current);
20391+ stack_end = stack_start + THREAD_SIZE;
20392+ if (likely(stack_start <= sp && sp < stack_end)) {
20393+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
20394+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
20395+ return;
20396+ }
20397+
20398+ cpu = get_cpu();
20399+
20400+ /* check the irq stacks */
20401+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
20402+ stack_start = stack_end - IRQ_STACK_SIZE;
20403+ if (stack_start <= sp && sp < stack_end) {
20404+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
20405+ put_cpu();
20406+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
20407+ return;
20408+ }
20409+
20410+ /* check the exception stacks */
20411+ used = 0;
20412+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
20413+ stack_start = stack_end - EXCEPTION_STKSZ;
20414+ if (stack_end && stack_start <= sp && sp < stack_end) {
20415+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
20416+ put_cpu();
20417+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
20418+ return;
20419+ }
20420+
20421+ put_cpu();
20422+
20423+ /* unknown stack */
20424+ BUG();
20425+}
20426+EXPORT_SYMBOL(pax_check_alloca);
20427+#endif
20428diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
20429index d32abea..74daf4f 100644
20430--- a/arch/x86/kernel/e820.c
20431+++ b/arch/x86/kernel/e820.c
20432@@ -800,8 +800,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
20433
20434 static void early_panic(char *msg)
20435 {
20436- early_printk(msg);
20437- panic(msg);
20438+ early_printk("%s", msg);
20439+ panic("%s", msg);
20440 }
20441
20442 static int userdef __initdata;
20443diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
20444index d15f575..d692043 100644
20445--- a/arch/x86/kernel/early_printk.c
20446+++ b/arch/x86/kernel/early_printk.c
20447@@ -7,6 +7,7 @@
20448 #include <linux/pci_regs.h>
20449 #include <linux/pci_ids.h>
20450 #include <linux/errno.h>
20451+#include <linux/sched.h>
20452 #include <asm/io.h>
20453 #include <asm/processor.h>
20454 #include <asm/fcntl.h>
20455diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
20456index 8f3e2de..6b71e39 100644
20457--- a/arch/x86/kernel/entry_32.S
20458+++ b/arch/x86/kernel/entry_32.S
20459@@ -177,13 +177,153 @@
20460 /*CFI_REL_OFFSET gs, PT_GS*/
20461 .endm
20462 .macro SET_KERNEL_GS reg
20463+
20464+#ifdef CONFIG_CC_STACKPROTECTOR
20465 movl $(__KERNEL_STACK_CANARY), \reg
20466+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
20467+ movl $(__USER_DS), \reg
20468+#else
20469+ xorl \reg, \reg
20470+#endif
20471+
20472 movl \reg, %gs
20473 .endm
20474
20475 #endif /* CONFIG_X86_32_LAZY_GS */
20476
20477-.macro SAVE_ALL
20478+.macro pax_enter_kernel
20479+#ifdef CONFIG_PAX_KERNEXEC
20480+ call pax_enter_kernel
20481+#endif
20482+.endm
20483+
20484+.macro pax_exit_kernel
20485+#ifdef CONFIG_PAX_KERNEXEC
20486+ call pax_exit_kernel
20487+#endif
20488+.endm
20489+
20490+#ifdef CONFIG_PAX_KERNEXEC
20491+ENTRY(pax_enter_kernel)
20492+#ifdef CONFIG_PARAVIRT
20493+ pushl %eax
20494+ pushl %ecx
20495+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
20496+ mov %eax, %esi
20497+#else
20498+ mov %cr0, %esi
20499+#endif
20500+ bts $16, %esi
20501+ jnc 1f
20502+ mov %cs, %esi
20503+ cmp $__KERNEL_CS, %esi
20504+ jz 3f
20505+ ljmp $__KERNEL_CS, $3f
20506+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
20507+2:
20508+#ifdef CONFIG_PARAVIRT
20509+ mov %esi, %eax
20510+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
20511+#else
20512+ mov %esi, %cr0
20513+#endif
20514+3:
20515+#ifdef CONFIG_PARAVIRT
20516+ popl %ecx
20517+ popl %eax
20518+#endif
20519+ ret
20520+ENDPROC(pax_enter_kernel)
20521+
20522+ENTRY(pax_exit_kernel)
20523+#ifdef CONFIG_PARAVIRT
20524+ pushl %eax
20525+ pushl %ecx
20526+#endif
20527+ mov %cs, %esi
20528+ cmp $__KERNEXEC_KERNEL_CS, %esi
20529+ jnz 2f
20530+#ifdef CONFIG_PARAVIRT
20531+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
20532+ mov %eax, %esi
20533+#else
20534+ mov %cr0, %esi
20535+#endif
20536+ btr $16, %esi
20537+ ljmp $__KERNEL_CS, $1f
20538+1:
20539+#ifdef CONFIG_PARAVIRT
20540+ mov %esi, %eax
20541+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
20542+#else
20543+ mov %esi, %cr0
20544+#endif
20545+2:
20546+#ifdef CONFIG_PARAVIRT
20547+ popl %ecx
20548+ popl %eax
20549+#endif
20550+ ret
20551+ENDPROC(pax_exit_kernel)
20552+#endif
20553+
20554+ .macro pax_erase_kstack
20555+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
20556+ call pax_erase_kstack
20557+#endif
20558+ .endm
20559+
20560+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
20561+/*
20562+ * ebp: thread_info
20563+ */
20564+ENTRY(pax_erase_kstack)
20565+ pushl %edi
20566+ pushl %ecx
20567+ pushl %eax
20568+
20569+ mov TI_lowest_stack(%ebp), %edi
20570+ mov $-0xBEEF, %eax
20571+ std
20572+
20573+1: mov %edi, %ecx
20574+ and $THREAD_SIZE_asm - 1, %ecx
20575+ shr $2, %ecx
20576+ repne scasl
20577+ jecxz 2f
20578+
20579+ cmp $2*16, %ecx
20580+ jc 2f
20581+
20582+ mov $2*16, %ecx
20583+ repe scasl
20584+ jecxz 2f
20585+ jne 1b
20586+
20587+2: cld
20588+ mov %esp, %ecx
20589+ sub %edi, %ecx
20590+
20591+ cmp $THREAD_SIZE_asm, %ecx
20592+ jb 3f
20593+ ud2
20594+3:
20595+
20596+ shr $2, %ecx
20597+ rep stosl
20598+
20599+ mov TI_task_thread_sp0(%ebp), %edi
20600+ sub $128, %edi
20601+ mov %edi, TI_lowest_stack(%ebp)
20602+
20603+ popl %eax
20604+ popl %ecx
20605+ popl %edi
20606+ ret
20607+ENDPROC(pax_erase_kstack)
20608+#endif
20609+
20610+.macro __SAVE_ALL _DS
20611 cld
20612 PUSH_GS
20613 pushl_cfi %fs
20614@@ -206,7 +346,7 @@
20615 CFI_REL_OFFSET ecx, 0
20616 pushl_cfi %ebx
20617 CFI_REL_OFFSET ebx, 0
20618- movl $(__USER_DS), %edx
20619+ movl $\_DS, %edx
20620 movl %edx, %ds
20621 movl %edx, %es
20622 movl $(__KERNEL_PERCPU), %edx
20623@@ -214,6 +354,15 @@
20624 SET_KERNEL_GS %edx
20625 .endm
20626
20627+.macro SAVE_ALL
20628+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
20629+ __SAVE_ALL __KERNEL_DS
20630+ pax_enter_kernel
20631+#else
20632+ __SAVE_ALL __USER_DS
20633+#endif
20634+.endm
20635+
20636 .macro RESTORE_INT_REGS
20637 popl_cfi %ebx
20638 CFI_RESTORE ebx
20639@@ -297,7 +446,7 @@ ENTRY(ret_from_fork)
20640 popfl_cfi
20641 jmp syscall_exit
20642 CFI_ENDPROC
20643-END(ret_from_fork)
20644+ENDPROC(ret_from_fork)
20645
20646 ENTRY(ret_from_kernel_thread)
20647 CFI_STARTPROC
20648@@ -344,7 +493,15 @@ ret_from_intr:
20649 andl $SEGMENT_RPL_MASK, %eax
20650 #endif
20651 cmpl $USER_RPL, %eax
20652+
20653+#ifdef CONFIG_PAX_KERNEXEC
20654+ jae resume_userspace
20655+
20656+ pax_exit_kernel
20657+ jmp resume_kernel
20658+#else
20659 jb resume_kernel # not returning to v8086 or userspace
20660+#endif
20661
20662 ENTRY(resume_userspace)
20663 LOCKDEP_SYS_EXIT
20664@@ -356,8 +513,8 @@ ENTRY(resume_userspace)
20665 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
20666 # int/exception return?
20667 jne work_pending
20668- jmp restore_all
20669-END(ret_from_exception)
20670+ jmp restore_all_pax
20671+ENDPROC(ret_from_exception)
20672
20673 #ifdef CONFIG_PREEMPT
20674 ENTRY(resume_kernel)
20675@@ -372,7 +529,7 @@ need_resched:
20676 jz restore_all
20677 call preempt_schedule_irq
20678 jmp need_resched
20679-END(resume_kernel)
20680+ENDPROC(resume_kernel)
20681 #endif
20682 CFI_ENDPROC
20683 /*
20684@@ -406,30 +563,45 @@ sysenter_past_esp:
20685 /*CFI_REL_OFFSET cs, 0*/
20686 /*
20687 * Push current_thread_info()->sysenter_return to the stack.
20688- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
20689- * pushed above; +8 corresponds to copy_thread's esp0 setting.
20690 */
20691- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
20692+ pushl_cfi $0
20693 CFI_REL_OFFSET eip, 0
20694
20695 pushl_cfi %eax
20696 SAVE_ALL
20697+ GET_THREAD_INFO(%ebp)
20698+ movl TI_sysenter_return(%ebp),%ebp
20699+ movl %ebp,PT_EIP(%esp)
20700 ENABLE_INTERRUPTS(CLBR_NONE)
20701
20702 /*
20703 * Load the potential sixth argument from user stack.
20704 * Careful about security.
20705 */
20706+ movl PT_OLDESP(%esp),%ebp
20707+
20708+#ifdef CONFIG_PAX_MEMORY_UDEREF
20709+ mov PT_OLDSS(%esp),%ds
20710+1: movl %ds:(%ebp),%ebp
20711+ push %ss
20712+ pop %ds
20713+#else
20714 cmpl $__PAGE_OFFSET-3,%ebp
20715 jae syscall_fault
20716 ASM_STAC
20717 1: movl (%ebp),%ebp
20718 ASM_CLAC
20719+#endif
20720+
20721 movl %ebp,PT_EBP(%esp)
20722 _ASM_EXTABLE(1b,syscall_fault)
20723
20724 GET_THREAD_INFO(%ebp)
20725
20726+#ifdef CONFIG_PAX_RANDKSTACK
20727+ pax_erase_kstack
20728+#endif
20729+
20730 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
20731 jnz sysenter_audit
20732 sysenter_do_call:
20733@@ -444,12 +616,24 @@ sysenter_do_call:
20734 testl $_TIF_ALLWORK_MASK, %ecx
20735 jne sysexit_audit
20736 sysenter_exit:
20737+
20738+#ifdef CONFIG_PAX_RANDKSTACK
20739+ pushl_cfi %eax
20740+ movl %esp, %eax
20741+ call pax_randomize_kstack
20742+ popl_cfi %eax
20743+#endif
20744+
20745+ pax_erase_kstack
20746+
20747 /* if something modifies registers it must also disable sysexit */
20748 movl PT_EIP(%esp), %edx
20749 movl PT_OLDESP(%esp), %ecx
20750 xorl %ebp,%ebp
20751 TRACE_IRQS_ON
20752 1: mov PT_FS(%esp), %fs
20753+2: mov PT_DS(%esp), %ds
20754+3: mov PT_ES(%esp), %es
20755 PTGS_TO_GS
20756 ENABLE_INTERRUPTS_SYSEXIT
20757
20758@@ -466,6 +650,9 @@ sysenter_audit:
20759 movl %eax,%edx /* 2nd arg: syscall number */
20760 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
20761 call __audit_syscall_entry
20762+
20763+ pax_erase_kstack
20764+
20765 pushl_cfi %ebx
20766 movl PT_EAX(%esp),%eax /* reload syscall number */
20767 jmp sysenter_do_call
20768@@ -491,10 +678,16 @@ sysexit_audit:
20769
20770 CFI_ENDPROC
20771 .pushsection .fixup,"ax"
20772-2: movl $0,PT_FS(%esp)
20773+4: movl $0,PT_FS(%esp)
20774+ jmp 1b
20775+5: movl $0,PT_DS(%esp)
20776+ jmp 1b
20777+6: movl $0,PT_ES(%esp)
20778 jmp 1b
20779 .popsection
20780- _ASM_EXTABLE(1b,2b)
20781+ _ASM_EXTABLE(1b,4b)
20782+ _ASM_EXTABLE(2b,5b)
20783+ _ASM_EXTABLE(3b,6b)
20784 PTGS_TO_GS_EX
20785 ENDPROC(ia32_sysenter_target)
20786
20787@@ -509,6 +702,11 @@ ENTRY(system_call)
20788 pushl_cfi %eax # save orig_eax
20789 SAVE_ALL
20790 GET_THREAD_INFO(%ebp)
20791+
20792+#ifdef CONFIG_PAX_RANDKSTACK
20793+ pax_erase_kstack
20794+#endif
20795+
20796 # system call tracing in operation / emulation
20797 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
20798 jnz syscall_trace_entry
20799@@ -527,6 +725,15 @@ syscall_exit:
20800 testl $_TIF_ALLWORK_MASK, %ecx # current->work
20801 jne syscall_exit_work
20802
20803+restore_all_pax:
20804+
20805+#ifdef CONFIG_PAX_RANDKSTACK
20806+ movl %esp, %eax
20807+ call pax_randomize_kstack
20808+#endif
20809+
20810+ pax_erase_kstack
20811+
20812 restore_all:
20813 TRACE_IRQS_IRET
20814 restore_all_notrace:
20815@@ -583,14 +790,34 @@ ldt_ss:
20816 * compensating for the offset by changing to the ESPFIX segment with
20817 * a base address that matches for the difference.
20818 */
20819-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
20820+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
20821 mov %esp, %edx /* load kernel esp */
20822 mov PT_OLDESP(%esp), %eax /* load userspace esp */
20823 mov %dx, %ax /* eax: new kernel esp */
20824 sub %eax, %edx /* offset (low word is 0) */
20825+#ifdef CONFIG_SMP
20826+ movl PER_CPU_VAR(cpu_number), %ebx
20827+ shll $PAGE_SHIFT_asm, %ebx
20828+ addl $cpu_gdt_table, %ebx
20829+#else
20830+ movl $cpu_gdt_table, %ebx
20831+#endif
20832 shr $16, %edx
20833- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
20834- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
20835+
20836+#ifdef CONFIG_PAX_KERNEXEC
20837+ mov %cr0, %esi
20838+ btr $16, %esi
20839+ mov %esi, %cr0
20840+#endif
20841+
20842+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
20843+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
20844+
20845+#ifdef CONFIG_PAX_KERNEXEC
20846+ bts $16, %esi
20847+ mov %esi, %cr0
20848+#endif
20849+
20850 pushl_cfi $__ESPFIX_SS
20851 pushl_cfi %eax /* new kernel esp */
20852 /* Disable interrupts, but do not irqtrace this section: we
20853@@ -619,20 +846,18 @@ work_resched:
20854 movl TI_flags(%ebp), %ecx
20855 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
20856 # than syscall tracing?
20857- jz restore_all
20858+ jz restore_all_pax
20859 testb $_TIF_NEED_RESCHED, %cl
20860 jnz work_resched
20861
20862 work_notifysig: # deal with pending signals and
20863 # notify-resume requests
20864+ movl %esp, %eax
20865 #ifdef CONFIG_VM86
20866 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
20867- movl %esp, %eax
20868 jne work_notifysig_v86 # returning to kernel-space or
20869 # vm86-space
20870 1:
20871-#else
20872- movl %esp, %eax
20873 #endif
20874 TRACE_IRQS_ON
20875 ENABLE_INTERRUPTS(CLBR_NONE)
20876@@ -653,7 +878,7 @@ work_notifysig_v86:
20877 movl %eax, %esp
20878 jmp 1b
20879 #endif
20880-END(work_pending)
20881+ENDPROC(work_pending)
20882
20883 # perform syscall exit tracing
20884 ALIGN
20885@@ -661,11 +886,14 @@ syscall_trace_entry:
20886 movl $-ENOSYS,PT_EAX(%esp)
20887 movl %esp, %eax
20888 call syscall_trace_enter
20889+
20890+ pax_erase_kstack
20891+
20892 /* What it returned is what we'll actually use. */
20893 cmpl $(NR_syscalls), %eax
20894 jnae syscall_call
20895 jmp syscall_exit
20896-END(syscall_trace_entry)
20897+ENDPROC(syscall_trace_entry)
20898
20899 # perform syscall exit tracing
20900 ALIGN
20901@@ -678,21 +906,25 @@ syscall_exit_work:
20902 movl %esp, %eax
20903 call syscall_trace_leave
20904 jmp resume_userspace
20905-END(syscall_exit_work)
20906+ENDPROC(syscall_exit_work)
20907 CFI_ENDPROC
20908
20909 RING0_INT_FRAME # can't unwind into user space anyway
20910 syscall_fault:
20911+#ifdef CONFIG_PAX_MEMORY_UDEREF
20912+ push %ss
20913+ pop %ds
20914+#endif
20915 ASM_CLAC
20916 GET_THREAD_INFO(%ebp)
20917 movl $-EFAULT,PT_EAX(%esp)
20918 jmp resume_userspace
20919-END(syscall_fault)
20920+ENDPROC(syscall_fault)
20921
20922 syscall_badsys:
20923 movl $-ENOSYS,PT_EAX(%esp)
20924 jmp resume_userspace
20925-END(syscall_badsys)
20926+ENDPROC(syscall_badsys)
20927 CFI_ENDPROC
20928 /*
20929 * End of kprobes section
20930@@ -708,8 +940,15 @@ END(syscall_badsys)
20931 * normal stack and adjusts ESP with the matching offset.
20932 */
20933 /* fixup the stack */
20934- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
20935- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
20936+#ifdef CONFIG_SMP
20937+ movl PER_CPU_VAR(cpu_number), %ebx
20938+ shll $PAGE_SHIFT_asm, %ebx
20939+ addl $cpu_gdt_table, %ebx
20940+#else
20941+ movl $cpu_gdt_table, %ebx
20942+#endif
20943+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
20944+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
20945 shl $16, %eax
20946 addl %esp, %eax /* the adjusted stack pointer */
20947 pushl_cfi $__KERNEL_DS
20948@@ -762,7 +1001,7 @@ vector=vector+1
20949 .endr
20950 2: jmp common_interrupt
20951 .endr
20952-END(irq_entries_start)
20953+ENDPROC(irq_entries_start)
20954
20955 .previous
20956 END(interrupt)
20957@@ -813,7 +1052,7 @@ ENTRY(coprocessor_error)
20958 pushl_cfi $do_coprocessor_error
20959 jmp error_code
20960 CFI_ENDPROC
20961-END(coprocessor_error)
20962+ENDPROC(coprocessor_error)
20963
20964 ENTRY(simd_coprocessor_error)
20965 RING0_INT_FRAME
20966@@ -826,7 +1065,7 @@ ENTRY(simd_coprocessor_error)
20967 .section .altinstructions,"a"
20968 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
20969 .previous
20970-.section .altinstr_replacement,"ax"
20971+.section .altinstr_replacement,"a"
20972 663: pushl $do_simd_coprocessor_error
20973 664:
20974 .previous
20975@@ -835,7 +1074,7 @@ ENTRY(simd_coprocessor_error)
20976 #endif
20977 jmp error_code
20978 CFI_ENDPROC
20979-END(simd_coprocessor_error)
20980+ENDPROC(simd_coprocessor_error)
20981
20982 ENTRY(device_not_available)
20983 RING0_INT_FRAME
20984@@ -844,18 +1083,18 @@ ENTRY(device_not_available)
20985 pushl_cfi $do_device_not_available
20986 jmp error_code
20987 CFI_ENDPROC
20988-END(device_not_available)
20989+ENDPROC(device_not_available)
20990
20991 #ifdef CONFIG_PARAVIRT
20992 ENTRY(native_iret)
20993 iret
20994 _ASM_EXTABLE(native_iret, iret_exc)
20995-END(native_iret)
20996+ENDPROC(native_iret)
20997
20998 ENTRY(native_irq_enable_sysexit)
20999 sti
21000 sysexit
21001-END(native_irq_enable_sysexit)
21002+ENDPROC(native_irq_enable_sysexit)
21003 #endif
21004
21005 ENTRY(overflow)
21006@@ -865,7 +1104,7 @@ ENTRY(overflow)
21007 pushl_cfi $do_overflow
21008 jmp error_code
21009 CFI_ENDPROC
21010-END(overflow)
21011+ENDPROC(overflow)
21012
21013 ENTRY(bounds)
21014 RING0_INT_FRAME
21015@@ -874,7 +1113,7 @@ ENTRY(bounds)
21016 pushl_cfi $do_bounds
21017 jmp error_code
21018 CFI_ENDPROC
21019-END(bounds)
21020+ENDPROC(bounds)
21021
21022 ENTRY(invalid_op)
21023 RING0_INT_FRAME
21024@@ -883,7 +1122,7 @@ ENTRY(invalid_op)
21025 pushl_cfi $do_invalid_op
21026 jmp error_code
21027 CFI_ENDPROC
21028-END(invalid_op)
21029+ENDPROC(invalid_op)
21030
21031 ENTRY(coprocessor_segment_overrun)
21032 RING0_INT_FRAME
21033@@ -892,7 +1131,7 @@ ENTRY(coprocessor_segment_overrun)
21034 pushl_cfi $do_coprocessor_segment_overrun
21035 jmp error_code
21036 CFI_ENDPROC
21037-END(coprocessor_segment_overrun)
21038+ENDPROC(coprocessor_segment_overrun)
21039
21040 ENTRY(invalid_TSS)
21041 RING0_EC_FRAME
21042@@ -900,7 +1139,7 @@ ENTRY(invalid_TSS)
21043 pushl_cfi $do_invalid_TSS
21044 jmp error_code
21045 CFI_ENDPROC
21046-END(invalid_TSS)
21047+ENDPROC(invalid_TSS)
21048
21049 ENTRY(segment_not_present)
21050 RING0_EC_FRAME
21051@@ -908,7 +1147,7 @@ ENTRY(segment_not_present)
21052 pushl_cfi $do_segment_not_present
21053 jmp error_code
21054 CFI_ENDPROC
21055-END(segment_not_present)
21056+ENDPROC(segment_not_present)
21057
21058 ENTRY(stack_segment)
21059 RING0_EC_FRAME
21060@@ -916,7 +1155,7 @@ ENTRY(stack_segment)
21061 pushl_cfi $do_stack_segment
21062 jmp error_code
21063 CFI_ENDPROC
21064-END(stack_segment)
21065+ENDPROC(stack_segment)
21066
21067 ENTRY(alignment_check)
21068 RING0_EC_FRAME
21069@@ -924,7 +1163,7 @@ ENTRY(alignment_check)
21070 pushl_cfi $do_alignment_check
21071 jmp error_code
21072 CFI_ENDPROC
21073-END(alignment_check)
21074+ENDPROC(alignment_check)
21075
21076 ENTRY(divide_error)
21077 RING0_INT_FRAME
21078@@ -933,7 +1172,7 @@ ENTRY(divide_error)
21079 pushl_cfi $do_divide_error
21080 jmp error_code
21081 CFI_ENDPROC
21082-END(divide_error)
21083+ENDPROC(divide_error)
21084
21085 #ifdef CONFIG_X86_MCE
21086 ENTRY(machine_check)
21087@@ -943,7 +1182,7 @@ ENTRY(machine_check)
21088 pushl_cfi machine_check_vector
21089 jmp error_code
21090 CFI_ENDPROC
21091-END(machine_check)
21092+ENDPROC(machine_check)
21093 #endif
21094
21095 ENTRY(spurious_interrupt_bug)
21096@@ -953,7 +1192,7 @@ ENTRY(spurious_interrupt_bug)
21097 pushl_cfi $do_spurious_interrupt_bug
21098 jmp error_code
21099 CFI_ENDPROC
21100-END(spurious_interrupt_bug)
21101+ENDPROC(spurious_interrupt_bug)
21102 /*
21103 * End of kprobes section
21104 */
21105@@ -1063,7 +1302,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
21106
21107 ENTRY(mcount)
21108 ret
21109-END(mcount)
21110+ENDPROC(mcount)
21111
21112 ENTRY(ftrace_caller)
21113 cmpl $0, function_trace_stop
21114@@ -1096,7 +1335,7 @@ ftrace_graph_call:
21115 .globl ftrace_stub
21116 ftrace_stub:
21117 ret
21118-END(ftrace_caller)
21119+ENDPROC(ftrace_caller)
21120
21121 ENTRY(ftrace_regs_caller)
21122 pushf /* push flags before compare (in cs location) */
21123@@ -1197,7 +1436,7 @@ trace:
21124 popl %ecx
21125 popl %eax
21126 jmp ftrace_stub
21127-END(mcount)
21128+ENDPROC(mcount)
21129 #endif /* CONFIG_DYNAMIC_FTRACE */
21130 #endif /* CONFIG_FUNCTION_TRACER */
21131
21132@@ -1215,7 +1454,7 @@ ENTRY(ftrace_graph_caller)
21133 popl %ecx
21134 popl %eax
21135 ret
21136-END(ftrace_graph_caller)
21137+ENDPROC(ftrace_graph_caller)
21138
21139 .globl return_to_handler
21140 return_to_handler:
21141@@ -1271,15 +1510,18 @@ error_code:
21142 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
21143 REG_TO_PTGS %ecx
21144 SET_KERNEL_GS %ecx
21145- movl $(__USER_DS), %ecx
21146+ movl $(__KERNEL_DS), %ecx
21147 movl %ecx, %ds
21148 movl %ecx, %es
21149+
21150+ pax_enter_kernel
21151+
21152 TRACE_IRQS_OFF
21153 movl %esp,%eax # pt_regs pointer
21154 call *%edi
21155 jmp ret_from_exception
21156 CFI_ENDPROC
21157-END(page_fault)
21158+ENDPROC(page_fault)
21159
21160 /*
21161 * Debug traps and NMI can happen at the one SYSENTER instruction
21162@@ -1322,7 +1564,7 @@ debug_stack_correct:
21163 call do_debug
21164 jmp ret_from_exception
21165 CFI_ENDPROC
21166-END(debug)
21167+ENDPROC(debug)
21168
21169 /*
21170 * NMI is doubly nasty. It can happen _while_ we're handling
21171@@ -1360,6 +1602,9 @@ nmi_stack_correct:
21172 xorl %edx,%edx # zero error code
21173 movl %esp,%eax # pt_regs pointer
21174 call do_nmi
21175+
21176+ pax_exit_kernel
21177+
21178 jmp restore_all_notrace
21179 CFI_ENDPROC
21180
21181@@ -1396,12 +1641,15 @@ nmi_espfix_stack:
21182 FIXUP_ESPFIX_STACK # %eax == %esp
21183 xorl %edx,%edx # zero error code
21184 call do_nmi
21185+
21186+ pax_exit_kernel
21187+
21188 RESTORE_REGS
21189 lss 12+4(%esp), %esp # back to espfix stack
21190 CFI_ADJUST_CFA_OFFSET -24
21191 jmp irq_return
21192 CFI_ENDPROC
21193-END(nmi)
21194+ENDPROC(nmi)
21195
21196 ENTRY(int3)
21197 RING0_INT_FRAME
21198@@ -1414,14 +1662,14 @@ ENTRY(int3)
21199 call do_int3
21200 jmp ret_from_exception
21201 CFI_ENDPROC
21202-END(int3)
21203+ENDPROC(int3)
21204
21205 ENTRY(general_protection)
21206 RING0_EC_FRAME
21207 pushl_cfi $do_general_protection
21208 jmp error_code
21209 CFI_ENDPROC
21210-END(general_protection)
21211+ENDPROC(general_protection)
21212
21213 #ifdef CONFIG_KVM_GUEST
21214 ENTRY(async_page_fault)
21215@@ -1430,7 +1678,7 @@ ENTRY(async_page_fault)
21216 pushl_cfi $do_async_page_fault
21217 jmp error_code
21218 CFI_ENDPROC
21219-END(async_page_fault)
21220+ENDPROC(async_page_fault)
21221 #endif
21222
21223 /*
21224diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
21225index 7272089..0b74104 100644
21226--- a/arch/x86/kernel/entry_64.S
21227+++ b/arch/x86/kernel/entry_64.S
21228@@ -59,6 +59,8 @@
21229 #include <asm/context_tracking.h>
21230 #include <asm/smap.h>
21231 #include <linux/err.h>
21232+#include <asm/pgtable.h>
21233+#include <asm/alternative-asm.h>
21234
21235 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
21236 #include <linux/elf-em.h>
21237@@ -80,8 +82,9 @@
21238 #ifdef CONFIG_DYNAMIC_FTRACE
21239
21240 ENTRY(function_hook)
21241+ pax_force_retaddr
21242 retq
21243-END(function_hook)
21244+ENDPROC(function_hook)
21245
21246 /* skip is set if stack has been adjusted */
21247 .macro ftrace_caller_setup skip=0
21248@@ -122,8 +125,9 @@ GLOBAL(ftrace_graph_call)
21249 #endif
21250
21251 GLOBAL(ftrace_stub)
21252+ pax_force_retaddr
21253 retq
21254-END(ftrace_caller)
21255+ENDPROC(ftrace_caller)
21256
21257 ENTRY(ftrace_regs_caller)
21258 /* Save the current flags before compare (in SS location)*/
21259@@ -191,7 +195,7 @@ ftrace_restore_flags:
21260 popfq
21261 jmp ftrace_stub
21262
21263-END(ftrace_regs_caller)
21264+ENDPROC(ftrace_regs_caller)
21265
21266
21267 #else /* ! CONFIG_DYNAMIC_FTRACE */
21268@@ -212,6 +216,7 @@ ENTRY(function_hook)
21269 #endif
21270
21271 GLOBAL(ftrace_stub)
21272+ pax_force_retaddr
21273 retq
21274
21275 trace:
21276@@ -225,12 +230,13 @@ trace:
21277 #endif
21278 subq $MCOUNT_INSN_SIZE, %rdi
21279
21280+ pax_force_fptr ftrace_trace_function
21281 call *ftrace_trace_function
21282
21283 MCOUNT_RESTORE_FRAME
21284
21285 jmp ftrace_stub
21286-END(function_hook)
21287+ENDPROC(function_hook)
21288 #endif /* CONFIG_DYNAMIC_FTRACE */
21289 #endif /* CONFIG_FUNCTION_TRACER */
21290
21291@@ -252,8 +258,9 @@ ENTRY(ftrace_graph_caller)
21292
21293 MCOUNT_RESTORE_FRAME
21294
21295+ pax_force_retaddr
21296 retq
21297-END(ftrace_graph_caller)
21298+ENDPROC(ftrace_graph_caller)
21299
21300 GLOBAL(return_to_handler)
21301 subq $24, %rsp
21302@@ -269,7 +276,9 @@ GLOBAL(return_to_handler)
21303 movq 8(%rsp), %rdx
21304 movq (%rsp), %rax
21305 addq $24, %rsp
21306+ pax_force_fptr %rdi
21307 jmp *%rdi
21308+ENDPROC(return_to_handler)
21309 #endif
21310
21311
21312@@ -284,6 +293,430 @@ ENTRY(native_usergs_sysret64)
21313 ENDPROC(native_usergs_sysret64)
21314 #endif /* CONFIG_PARAVIRT */
21315
21316+ .macro ljmpq sel, off
21317+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
21318+ .byte 0x48; ljmp *1234f(%rip)
21319+ .pushsection .rodata
21320+ .align 16
21321+ 1234: .quad \off; .word \sel
21322+ .popsection
21323+#else
21324+ pushq $\sel
21325+ pushq $\off
21326+ lretq
21327+#endif
21328+ .endm
21329+
21330+ .macro pax_enter_kernel
21331+ pax_set_fptr_mask
21332+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
21333+ call pax_enter_kernel
21334+#endif
21335+ .endm
21336+
21337+ .macro pax_exit_kernel
21338+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
21339+ call pax_exit_kernel
21340+#endif
21341+
21342+ .endm
21343+
21344+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
21345+ENTRY(pax_enter_kernel)
21346+ pushq %rdi
21347+
21348+#ifdef CONFIG_PARAVIRT
21349+ PV_SAVE_REGS(CLBR_RDI)
21350+#endif
21351+
21352+#ifdef CONFIG_PAX_KERNEXEC
21353+ GET_CR0_INTO_RDI
21354+ bts $16,%rdi
21355+ jnc 3f
21356+ mov %cs,%edi
21357+ cmp $__KERNEL_CS,%edi
21358+ jnz 2f
21359+1:
21360+#endif
21361+
21362+#ifdef CONFIG_PAX_MEMORY_UDEREF
21363+ 661: jmp 111f
21364+ .pushsection .altinstr_replacement, "a"
21365+ 662: ASM_NOP2
21366+ .popsection
21367+ .pushsection .altinstructions, "a"
21368+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
21369+ .popsection
21370+ GET_CR3_INTO_RDI
21371+ cmp $0,%dil
21372+ jnz 112f
21373+ mov $__KERNEL_DS,%edi
21374+ mov %edi,%ss
21375+ jmp 111f
21376+112: cmp $1,%dil
21377+ jz 113f
21378+ ud2
21379+113: sub $4097,%rdi
21380+ bts $63,%rdi
21381+ SET_RDI_INTO_CR3
21382+ mov $__UDEREF_KERNEL_DS,%edi
21383+ mov %edi,%ss
21384+111:
21385+#endif
21386+
21387+#ifdef CONFIG_PARAVIRT
21388+ PV_RESTORE_REGS(CLBR_RDI)
21389+#endif
21390+
21391+ popq %rdi
21392+ pax_force_retaddr
21393+ retq
21394+
21395+#ifdef CONFIG_PAX_KERNEXEC
21396+2: ljmpq __KERNEL_CS,1b
21397+3: ljmpq __KERNEXEC_KERNEL_CS,4f
21398+4: SET_RDI_INTO_CR0
21399+ jmp 1b
21400+#endif
21401+ENDPROC(pax_enter_kernel)
21402+
21403+ENTRY(pax_exit_kernel)
21404+ pushq %rdi
21405+
21406+#ifdef CONFIG_PARAVIRT
21407+ PV_SAVE_REGS(CLBR_RDI)
21408+#endif
21409+
21410+#ifdef CONFIG_PAX_KERNEXEC
21411+ mov %cs,%rdi
21412+ cmp $__KERNEXEC_KERNEL_CS,%edi
21413+ jz 2f
21414+ GET_CR0_INTO_RDI
21415+ bts $16,%rdi
21416+ jnc 4f
21417+1:
21418+#endif
21419+
21420+#ifdef CONFIG_PAX_MEMORY_UDEREF
21421+ 661: jmp 111f
21422+ .pushsection .altinstr_replacement, "a"
21423+ 662: ASM_NOP2
21424+ .popsection
21425+ .pushsection .altinstructions, "a"
21426+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
21427+ .popsection
21428+ mov %ss,%edi
21429+ cmp $__UDEREF_KERNEL_DS,%edi
21430+ jnz 111f
21431+ GET_CR3_INTO_RDI
21432+ cmp $0,%dil
21433+ jz 112f
21434+ ud2
21435+112: add $4097,%rdi
21436+ bts $63,%rdi
21437+ SET_RDI_INTO_CR3
21438+ mov $__KERNEL_DS,%edi
21439+ mov %edi,%ss
21440+111:
21441+#endif
21442+
21443+#ifdef CONFIG_PARAVIRT
21444+ PV_RESTORE_REGS(CLBR_RDI);
21445+#endif
21446+
21447+ popq %rdi
21448+ pax_force_retaddr
21449+ retq
21450+
21451+#ifdef CONFIG_PAX_KERNEXEC
21452+2: GET_CR0_INTO_RDI
21453+ btr $16,%rdi
21454+ jnc 4f
21455+ ljmpq __KERNEL_CS,3f
21456+3: SET_RDI_INTO_CR0
21457+ jmp 1b
21458+4: ud2
21459+ jmp 4b
21460+#endif
21461+ENDPROC(pax_exit_kernel)
21462+#endif
21463+
21464+ .macro pax_enter_kernel_user
21465+ pax_set_fptr_mask
21466+#ifdef CONFIG_PAX_MEMORY_UDEREF
21467+ call pax_enter_kernel_user
21468+#endif
21469+ .endm
21470+
21471+ .macro pax_exit_kernel_user
21472+#ifdef CONFIG_PAX_MEMORY_UDEREF
21473+ call pax_exit_kernel_user
21474+#endif
21475+#ifdef CONFIG_PAX_RANDKSTACK
21476+ pushq %rax
21477+ pushq %r11
21478+ call pax_randomize_kstack
21479+ popq %r11
21480+ popq %rax
21481+#endif
21482+ .endm
21483+
21484+#ifdef CONFIG_PAX_MEMORY_UDEREF
21485+ENTRY(pax_enter_kernel_user)
21486+ pushq %rdi
21487+ pushq %rbx
21488+
21489+#ifdef CONFIG_PARAVIRT
21490+ PV_SAVE_REGS(CLBR_RDI)
21491+#endif
21492+
21493+ 661: jmp 111f
21494+ .pushsection .altinstr_replacement, "a"
21495+ 662: ASM_NOP2
21496+ .popsection
21497+ .pushsection .altinstructions, "a"
21498+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
21499+ .popsection
21500+ GET_CR3_INTO_RDI
21501+ cmp $1,%dil
21502+ jnz 4f
21503+ sub $4097,%rdi
21504+ bts $63,%rdi
21505+ SET_RDI_INTO_CR3
21506+ jmp 3f
21507+111:
21508+
21509+ GET_CR3_INTO_RDI
21510+ mov %rdi,%rbx
21511+ add $__START_KERNEL_map,%rbx
21512+ sub phys_base(%rip),%rbx
21513+
21514+#ifdef CONFIG_PARAVIRT
21515+ cmpl $0, pv_info+PARAVIRT_enabled
21516+ jz 1f
21517+ pushq %rdi
21518+ i = 0
21519+ .rept USER_PGD_PTRS
21520+ mov i*8(%rbx),%rsi
21521+ mov $0,%sil
21522+ lea i*8(%rbx),%rdi
21523+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
21524+ i = i + 1
21525+ .endr
21526+ popq %rdi
21527+ jmp 2f
21528+1:
21529+#endif
21530+
21531+ i = 0
21532+ .rept USER_PGD_PTRS
21533+ movb $0,i*8(%rbx)
21534+ i = i + 1
21535+ .endr
21536+
21537+2: SET_RDI_INTO_CR3
21538+
21539+#ifdef CONFIG_PAX_KERNEXEC
21540+ GET_CR0_INTO_RDI
21541+ bts $16,%rdi
21542+ SET_RDI_INTO_CR0
21543+#endif
21544+
21545+3:
21546+
21547+#ifdef CONFIG_PARAVIRT
21548+ PV_RESTORE_REGS(CLBR_RDI)
21549+#endif
21550+
21551+ popq %rbx
21552+ popq %rdi
21553+ pax_force_retaddr
21554+ retq
21555+4: ud2
21556+ENDPROC(pax_enter_kernel_user)
21557+
21558+ENTRY(pax_exit_kernel_user)
21559+ pushq %rdi
21560+ pushq %rbx
21561+
21562+#ifdef CONFIG_PARAVIRT
21563+ PV_SAVE_REGS(CLBR_RDI)
21564+#endif
21565+
21566+ GET_CR3_INTO_RDI
21567+ 661: jmp 1f
21568+ .pushsection .altinstr_replacement, "a"
21569+ 662: ASM_NOP2
21570+ .popsection
21571+ .pushsection .altinstructions, "a"
21572+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
21573+ .popsection
21574+ cmp $0,%dil
21575+ jnz 3f
21576+ add $4097,%rdi
21577+ bts $63,%rdi
21578+ SET_RDI_INTO_CR3
21579+ jmp 2f
21580+1:
21581+
21582+ mov %rdi,%rbx
21583+
21584+#ifdef CONFIG_PAX_KERNEXEC
21585+ GET_CR0_INTO_RDI
21586+ btr $16,%rdi
21587+ jnc 3f
21588+ SET_RDI_INTO_CR0
21589+#endif
21590+
21591+ add $__START_KERNEL_map,%rbx
21592+ sub phys_base(%rip),%rbx
21593+
21594+#ifdef CONFIG_PARAVIRT
21595+ cmpl $0, pv_info+PARAVIRT_enabled
21596+ jz 1f
21597+ i = 0
21598+ .rept USER_PGD_PTRS
21599+ mov i*8(%rbx),%rsi
21600+ mov $0x67,%sil
21601+ lea i*8(%rbx),%rdi
21602+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
21603+ i = i + 1
21604+ .endr
21605+ jmp 2f
21606+1:
21607+#endif
21608+
21609+ i = 0
21610+ .rept USER_PGD_PTRS
21611+ movb $0x67,i*8(%rbx)
21612+ i = i + 1
21613+ .endr
21614+2:
21615+
21616+#ifdef CONFIG_PARAVIRT
21617+ PV_RESTORE_REGS(CLBR_RDI)
21618+#endif
21619+
21620+ popq %rbx
21621+ popq %rdi
21622+ pax_force_retaddr
21623+ retq
21624+3: ud2
21625+ENDPROC(pax_exit_kernel_user)
21626+#endif
21627+
21628+ .macro pax_enter_kernel_nmi
21629+ pax_set_fptr_mask
21630+
21631+#ifdef CONFIG_PAX_KERNEXEC
21632+ GET_CR0_INTO_RDI
21633+ bts $16,%rdi
21634+ jc 110f
21635+ SET_RDI_INTO_CR0
21636+ or $2,%ebx
21637+110:
21638+#endif
21639+
21640+#ifdef CONFIG_PAX_MEMORY_UDEREF
21641+ 661: jmp 111f
21642+ .pushsection .altinstr_replacement, "a"
21643+ 662: ASM_NOP2
21644+ .popsection
21645+ .pushsection .altinstructions, "a"
21646+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
21647+ .popsection
21648+ GET_CR3_INTO_RDI
21649+ cmp $0,%dil
21650+ jz 111f
21651+ sub $4097,%rdi
21652+ or $4,%ebx
21653+ bts $63,%rdi
21654+ SET_RDI_INTO_CR3
21655+ mov $__UDEREF_KERNEL_DS,%edi
21656+ mov %edi,%ss
21657+111:
21658+#endif
21659+ .endm
21660+
21661+ .macro pax_exit_kernel_nmi
21662+#ifdef CONFIG_PAX_KERNEXEC
21663+ btr $1,%ebx
21664+ jnc 110f
21665+ GET_CR0_INTO_RDI
21666+ btr $16,%rdi
21667+ SET_RDI_INTO_CR0
21668+110:
21669+#endif
21670+
21671+#ifdef CONFIG_PAX_MEMORY_UDEREF
21672+ btr $2,%ebx
21673+ jnc 111f
21674+ GET_CR3_INTO_RDI
21675+ add $4097,%rdi
21676+ bts $63,%rdi
21677+ SET_RDI_INTO_CR3
21678+ mov $__KERNEL_DS,%edi
21679+ mov %edi,%ss
21680+111:
21681+#endif
21682+ .endm
21683+
21684+ .macro pax_erase_kstack
21685+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
21686+ call pax_erase_kstack
21687+#endif
21688+ .endm
21689+
21690+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
21691+ENTRY(pax_erase_kstack)
21692+ pushq %rdi
21693+ pushq %rcx
21694+ pushq %rax
21695+ pushq %r11
21696+
21697+ GET_THREAD_INFO(%r11)
21698+ mov TI_lowest_stack(%r11), %rdi
21699+ mov $-0xBEEF, %rax
21700+ std
21701+
21702+1: mov %edi, %ecx
21703+ and $THREAD_SIZE_asm - 1, %ecx
21704+ shr $3, %ecx
21705+ repne scasq
21706+ jecxz 2f
21707+
21708+ cmp $2*8, %ecx
21709+ jc 2f
21710+
21711+ mov $2*8, %ecx
21712+ repe scasq
21713+ jecxz 2f
21714+ jne 1b
21715+
21716+2: cld
21717+ mov %esp, %ecx
21718+ sub %edi, %ecx
21719+
21720+ cmp $THREAD_SIZE_asm, %rcx
21721+ jb 3f
21722+ ud2
21723+3:
21724+
21725+ shr $3, %ecx
21726+ rep stosq
21727+
21728+ mov TI_task_thread_sp0(%r11), %rdi
21729+ sub $256, %rdi
21730+ mov %rdi, TI_lowest_stack(%r11)
21731+
21732+ popq %r11
21733+ popq %rax
21734+ popq %rcx
21735+ popq %rdi
21736+ pax_force_retaddr
21737+ ret
21738+ENDPROC(pax_erase_kstack)
21739+#endif
21740
21741 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
21742 #ifdef CONFIG_TRACE_IRQFLAGS
21743@@ -375,8 +808,8 @@ ENDPROC(native_usergs_sysret64)
21744 .endm
21745
21746 .macro UNFAKE_STACK_FRAME
21747- addq $8*6, %rsp
21748- CFI_ADJUST_CFA_OFFSET -(6*8)
21749+ addq $8*6 + ARG_SKIP, %rsp
21750+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
21751 .endm
21752
21753 /*
21754@@ -463,7 +896,7 @@ ENDPROC(native_usergs_sysret64)
21755 movq %rsp, %rsi
21756
21757 leaq -RBP(%rsp),%rdi /* arg1 for handler */
21758- testl $3, CS-RBP(%rsi)
21759+ testb $3, CS-RBP(%rsi)
21760 je 1f
21761 SWAPGS
21762 /*
21763@@ -498,9 +931,10 @@ ENTRY(save_rest)
21764 movq_cfi r15, R15+16
21765 movq %r11, 8(%rsp) /* return address */
21766 FIXUP_TOP_OF_STACK %r11, 16
21767+ pax_force_retaddr
21768 ret
21769 CFI_ENDPROC
21770-END(save_rest)
21771+ENDPROC(save_rest)
21772
21773 /* save complete stack frame */
21774 .pushsection .kprobes.text, "ax"
21775@@ -529,9 +963,10 @@ ENTRY(save_paranoid)
21776 js 1f /* negative -> in kernel */
21777 SWAPGS
21778 xorl %ebx,%ebx
21779-1: ret
21780+1: pax_force_retaddr_bts
21781+ ret
21782 CFI_ENDPROC
21783-END(save_paranoid)
21784+ENDPROC(save_paranoid)
21785 .popsection
21786
21787 /*
21788@@ -553,7 +988,7 @@ ENTRY(ret_from_fork)
21789
21790 RESTORE_REST
21791
21792- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
21793+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
21794 jz 1f
21795
21796 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
21797@@ -571,7 +1006,7 @@ ENTRY(ret_from_fork)
21798 RESTORE_REST
21799 jmp int_ret_from_sys_call
21800 CFI_ENDPROC
21801-END(ret_from_fork)
21802+ENDPROC(ret_from_fork)
21803
21804 /*
21805 * System call entry. Up to 6 arguments in registers are supported.
21806@@ -608,7 +1043,7 @@ END(ret_from_fork)
21807 ENTRY(system_call)
21808 CFI_STARTPROC simple
21809 CFI_SIGNAL_FRAME
21810- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
21811+ CFI_DEF_CFA rsp,0
21812 CFI_REGISTER rip,rcx
21813 /*CFI_REGISTER rflags,r11*/
21814 SWAPGS_UNSAFE_STACK
21815@@ -621,16 +1056,23 @@ GLOBAL(system_call_after_swapgs)
21816
21817 movq %rsp,PER_CPU_VAR(old_rsp)
21818 movq PER_CPU_VAR(kernel_stack),%rsp
21819+ SAVE_ARGS 8*6,0
21820+ pax_enter_kernel_user
21821+
21822+#ifdef CONFIG_PAX_RANDKSTACK
21823+ pax_erase_kstack
21824+#endif
21825+
21826 /*
21827 * No need to follow this irqs off/on section - it's straight
21828 * and short:
21829 */
21830 ENABLE_INTERRUPTS(CLBR_NONE)
21831- SAVE_ARGS 8,0
21832 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
21833 movq %rcx,RIP-ARGOFFSET(%rsp)
21834 CFI_REL_OFFSET rip,RIP-ARGOFFSET
21835- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
21836+ GET_THREAD_INFO(%rcx)
21837+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
21838 jnz tracesys
21839 system_call_fastpath:
21840 #if __SYSCALL_MASK == ~0
21841@@ -640,7 +1082,7 @@ system_call_fastpath:
21842 cmpl $__NR_syscall_max,%eax
21843 #endif
21844 ja badsys
21845- movq %r10,%rcx
21846+ movq R10-ARGOFFSET(%rsp),%rcx
21847 call *sys_call_table(,%rax,8) # XXX: rip relative
21848 movq %rax,RAX-ARGOFFSET(%rsp)
21849 /*
21850@@ -654,10 +1096,13 @@ sysret_check:
21851 LOCKDEP_SYS_EXIT
21852 DISABLE_INTERRUPTS(CLBR_NONE)
21853 TRACE_IRQS_OFF
21854- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
21855+ GET_THREAD_INFO(%rcx)
21856+ movl TI_flags(%rcx),%edx
21857 andl %edi,%edx
21858 jnz sysret_careful
21859 CFI_REMEMBER_STATE
21860+ pax_exit_kernel_user
21861+ pax_erase_kstack
21862 /*
21863 * sysretq will re-enable interrupts:
21864 */
21865@@ -709,14 +1154,18 @@ badsys:
21866 * jump back to the normal fast path.
21867 */
21868 auditsys:
21869- movq %r10,%r9 /* 6th arg: 4th syscall arg */
21870+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
21871 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
21872 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
21873 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
21874 movq %rax,%rsi /* 2nd arg: syscall number */
21875 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
21876 call __audit_syscall_entry
21877+
21878+ pax_erase_kstack
21879+
21880 LOAD_ARGS 0 /* reload call-clobbered registers */
21881+ pax_set_fptr_mask
21882 jmp system_call_fastpath
21883
21884 /*
21885@@ -737,7 +1186,7 @@ sysret_audit:
21886 /* Do syscall tracing */
21887 tracesys:
21888 #ifdef CONFIG_AUDITSYSCALL
21889- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
21890+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
21891 jz auditsys
21892 #endif
21893 SAVE_REST
21894@@ -745,12 +1194,16 @@ tracesys:
21895 FIXUP_TOP_OF_STACK %rdi
21896 movq %rsp,%rdi
21897 call syscall_trace_enter
21898+
21899+ pax_erase_kstack
21900+
21901 /*
21902 * Reload arg registers from stack in case ptrace changed them.
21903 * We don't reload %rax because syscall_trace_enter() returned
21904 * the value it wants us to use in the table lookup.
21905 */
21906 LOAD_ARGS ARGOFFSET, 1
21907+ pax_set_fptr_mask
21908 RESTORE_REST
21909 #if __SYSCALL_MASK == ~0
21910 cmpq $__NR_syscall_max,%rax
21911@@ -759,7 +1212,7 @@ tracesys:
21912 cmpl $__NR_syscall_max,%eax
21913 #endif
21914 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
21915- movq %r10,%rcx /* fixup for C */
21916+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
21917 call *sys_call_table(,%rax,8)
21918 movq %rax,RAX-ARGOFFSET(%rsp)
21919 /* Use IRET because user could have changed frame */
21920@@ -780,7 +1233,9 @@ GLOBAL(int_with_check)
21921 andl %edi,%edx
21922 jnz int_careful
21923 andl $~TS_COMPAT,TI_status(%rcx)
21924- jmp retint_swapgs
21925+ pax_exit_kernel_user
21926+ pax_erase_kstack
21927+ jmp retint_swapgs_pax
21928
21929 /* Either reschedule or signal or syscall exit tracking needed. */
21930 /* First do a reschedule test. */
21931@@ -826,7 +1281,7 @@ int_restore_rest:
21932 TRACE_IRQS_OFF
21933 jmp int_with_check
21934 CFI_ENDPROC
21935-END(system_call)
21936+ENDPROC(system_call)
21937
21938 .macro FORK_LIKE func
21939 ENTRY(stub_\func)
21940@@ -839,9 +1294,10 @@ ENTRY(stub_\func)
21941 DEFAULT_FRAME 0 8 /* offset 8: return address */
21942 call sys_\func
21943 RESTORE_TOP_OF_STACK %r11, 8
21944+ pax_force_retaddr
21945 ret $REST_SKIP /* pop extended registers */
21946 CFI_ENDPROC
21947-END(stub_\func)
21948+ENDPROC(stub_\func)
21949 .endm
21950
21951 .macro FIXED_FRAME label,func
21952@@ -851,9 +1307,10 @@ ENTRY(\label)
21953 FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
21954 call \func
21955 RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
21956+ pax_force_retaddr
21957 ret
21958 CFI_ENDPROC
21959-END(\label)
21960+ENDPROC(\label)
21961 .endm
21962
21963 FORK_LIKE clone
21964@@ -870,9 +1327,10 @@ ENTRY(ptregscall_common)
21965 movq_cfi_restore R12+8, r12
21966 movq_cfi_restore RBP+8, rbp
21967 movq_cfi_restore RBX+8, rbx
21968+ pax_force_retaddr
21969 ret $REST_SKIP /* pop extended registers */
21970 CFI_ENDPROC
21971-END(ptregscall_common)
21972+ENDPROC(ptregscall_common)
21973
21974 ENTRY(stub_execve)
21975 CFI_STARTPROC
21976@@ -885,7 +1343,7 @@ ENTRY(stub_execve)
21977 RESTORE_REST
21978 jmp int_ret_from_sys_call
21979 CFI_ENDPROC
21980-END(stub_execve)
21981+ENDPROC(stub_execve)
21982
21983 /*
21984 * sigreturn is special because it needs to restore all registers on return.
21985@@ -902,7 +1360,7 @@ ENTRY(stub_rt_sigreturn)
21986 RESTORE_REST
21987 jmp int_ret_from_sys_call
21988 CFI_ENDPROC
21989-END(stub_rt_sigreturn)
21990+ENDPROC(stub_rt_sigreturn)
21991
21992 #ifdef CONFIG_X86_X32_ABI
21993 ENTRY(stub_x32_rt_sigreturn)
21994@@ -916,7 +1374,7 @@ ENTRY(stub_x32_rt_sigreturn)
21995 RESTORE_REST
21996 jmp int_ret_from_sys_call
21997 CFI_ENDPROC
21998-END(stub_x32_rt_sigreturn)
21999+ENDPROC(stub_x32_rt_sigreturn)
22000
22001 ENTRY(stub_x32_execve)
22002 CFI_STARTPROC
22003@@ -930,7 +1388,7 @@ ENTRY(stub_x32_execve)
22004 RESTORE_REST
22005 jmp int_ret_from_sys_call
22006 CFI_ENDPROC
22007-END(stub_x32_execve)
22008+ENDPROC(stub_x32_execve)
22009
22010 #endif
22011
22012@@ -967,7 +1425,7 @@ vector=vector+1
22013 2: jmp common_interrupt
22014 .endr
22015 CFI_ENDPROC
22016-END(irq_entries_start)
22017+ENDPROC(irq_entries_start)
22018
22019 .previous
22020 END(interrupt)
22021@@ -987,6 +1445,16 @@ END(interrupt)
22022 subq $ORIG_RAX-RBP, %rsp
22023 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
22024 SAVE_ARGS_IRQ
22025+#ifdef CONFIG_PAX_MEMORY_UDEREF
22026+ testb $3, CS(%rdi)
22027+ jnz 1f
22028+ pax_enter_kernel
22029+ jmp 2f
22030+1: pax_enter_kernel_user
22031+2:
22032+#else
22033+ pax_enter_kernel
22034+#endif
22035 call \func
22036 .endm
22037
22038@@ -1019,7 +1487,7 @@ ret_from_intr:
22039
22040 exit_intr:
22041 GET_THREAD_INFO(%rcx)
22042- testl $3,CS-ARGOFFSET(%rsp)
22043+ testb $3,CS-ARGOFFSET(%rsp)
22044 je retint_kernel
22045
22046 /* Interrupt came from user space */
22047@@ -1041,12 +1509,16 @@ retint_swapgs: /* return to user-space */
22048 * The iretq could re-enable interrupts:
22049 */
22050 DISABLE_INTERRUPTS(CLBR_ANY)
22051+ pax_exit_kernel_user
22052+retint_swapgs_pax:
22053 TRACE_IRQS_IRETQ
22054 SWAPGS
22055 jmp restore_args
22056
22057 retint_restore_args: /* return to kernel space */
22058 DISABLE_INTERRUPTS(CLBR_ANY)
22059+ pax_exit_kernel
22060+ pax_force_retaddr (RIP-ARGOFFSET)
22061 /*
22062 * The iretq could re-enable interrupts:
22063 */
22064@@ -1129,7 +1601,7 @@ ENTRY(retint_kernel)
22065 #endif
22066
22067 CFI_ENDPROC
22068-END(common_interrupt)
22069+ENDPROC(common_interrupt)
22070 /*
22071 * End of kprobes section
22072 */
22073@@ -1147,7 +1619,7 @@ ENTRY(\sym)
22074 interrupt \do_sym
22075 jmp ret_from_intr
22076 CFI_ENDPROC
22077-END(\sym)
22078+ENDPROC(\sym)
22079 .endm
22080
22081 #ifdef CONFIG_SMP
22082@@ -1208,12 +1680,22 @@ ENTRY(\sym)
22083 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
22084 call error_entry
22085 DEFAULT_FRAME 0
22086+#ifdef CONFIG_PAX_MEMORY_UDEREF
22087+ testb $3, CS(%rsp)
22088+ jnz 1f
22089+ pax_enter_kernel
22090+ jmp 2f
22091+1: pax_enter_kernel_user
22092+2:
22093+#else
22094+ pax_enter_kernel
22095+#endif
22096 movq %rsp,%rdi /* pt_regs pointer */
22097 xorl %esi,%esi /* no error code */
22098 call \do_sym
22099 jmp error_exit /* %ebx: no swapgs flag */
22100 CFI_ENDPROC
22101-END(\sym)
22102+ENDPROC(\sym)
22103 .endm
22104
22105 .macro paranoidzeroentry sym do_sym
22106@@ -1226,15 +1708,25 @@ ENTRY(\sym)
22107 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
22108 call save_paranoid
22109 TRACE_IRQS_OFF
22110+#ifdef CONFIG_PAX_MEMORY_UDEREF
22111+ testb $3, CS(%rsp)
22112+ jnz 1f
22113+ pax_enter_kernel
22114+ jmp 2f
22115+1: pax_enter_kernel_user
22116+2:
22117+#else
22118+ pax_enter_kernel
22119+#endif
22120 movq %rsp,%rdi /* pt_regs pointer */
22121 xorl %esi,%esi /* no error code */
22122 call \do_sym
22123 jmp paranoid_exit /* %ebx: no swapgs flag */
22124 CFI_ENDPROC
22125-END(\sym)
22126+ENDPROC(\sym)
22127 .endm
22128
22129-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
22130+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
22131 .macro paranoidzeroentry_ist sym do_sym ist
22132 ENTRY(\sym)
22133 INTR_FRAME
22134@@ -1245,14 +1737,30 @@ ENTRY(\sym)
22135 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
22136 call save_paranoid
22137 TRACE_IRQS_OFF_DEBUG
22138+#ifdef CONFIG_PAX_MEMORY_UDEREF
22139+ testb $3, CS(%rsp)
22140+ jnz 1f
22141+ pax_enter_kernel
22142+ jmp 2f
22143+1: pax_enter_kernel_user
22144+2:
22145+#else
22146+ pax_enter_kernel
22147+#endif
22148 movq %rsp,%rdi /* pt_regs pointer */
22149 xorl %esi,%esi /* no error code */
22150+#ifdef CONFIG_SMP
22151+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
22152+ lea init_tss(%r12), %r12
22153+#else
22154+ lea init_tss(%rip), %r12
22155+#endif
22156 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
22157 call \do_sym
22158 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
22159 jmp paranoid_exit /* %ebx: no swapgs flag */
22160 CFI_ENDPROC
22161-END(\sym)
22162+ENDPROC(\sym)
22163 .endm
22164
22165 .macro errorentry sym do_sym
22166@@ -1264,13 +1772,23 @@ ENTRY(\sym)
22167 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
22168 call error_entry
22169 DEFAULT_FRAME 0
22170+#ifdef CONFIG_PAX_MEMORY_UDEREF
22171+ testb $3, CS(%rsp)
22172+ jnz 1f
22173+ pax_enter_kernel
22174+ jmp 2f
22175+1: pax_enter_kernel_user
22176+2:
22177+#else
22178+ pax_enter_kernel
22179+#endif
22180 movq %rsp,%rdi /* pt_regs pointer */
22181 movq ORIG_RAX(%rsp),%rsi /* get error code */
22182 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
22183 call \do_sym
22184 jmp error_exit /* %ebx: no swapgs flag */
22185 CFI_ENDPROC
22186-END(\sym)
22187+ENDPROC(\sym)
22188 .endm
22189
22190 /* error code is on the stack already */
22191@@ -1284,13 +1802,23 @@ ENTRY(\sym)
22192 call save_paranoid
22193 DEFAULT_FRAME 0
22194 TRACE_IRQS_OFF
22195+#ifdef CONFIG_PAX_MEMORY_UDEREF
22196+ testb $3, CS(%rsp)
22197+ jnz 1f
22198+ pax_enter_kernel
22199+ jmp 2f
22200+1: pax_enter_kernel_user
22201+2:
22202+#else
22203+ pax_enter_kernel
22204+#endif
22205 movq %rsp,%rdi /* pt_regs pointer */
22206 movq ORIG_RAX(%rsp),%rsi /* get error code */
22207 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
22208 call \do_sym
22209 jmp paranoid_exit /* %ebx: no swapgs flag */
22210 CFI_ENDPROC
22211-END(\sym)
22212+ENDPROC(\sym)
22213 .endm
22214
22215 zeroentry divide_error do_divide_error
22216@@ -1320,9 +1848,10 @@ gs_change:
22217 2: mfence /* workaround */
22218 SWAPGS
22219 popfq_cfi
22220+ pax_force_retaddr
22221 ret
22222 CFI_ENDPROC
22223-END(native_load_gs_index)
22224+ENDPROC(native_load_gs_index)
22225
22226 _ASM_EXTABLE(gs_change,bad_gs)
22227 .section .fixup,"ax"
22228@@ -1350,9 +1879,10 @@ ENTRY(call_softirq)
22229 CFI_DEF_CFA_REGISTER rsp
22230 CFI_ADJUST_CFA_OFFSET -8
22231 decl PER_CPU_VAR(irq_count)
22232+ pax_force_retaddr
22233 ret
22234 CFI_ENDPROC
22235-END(call_softirq)
22236+ENDPROC(call_softirq)
22237
22238 #ifdef CONFIG_XEN
22239 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
22240@@ -1390,7 +1920,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
22241 decl PER_CPU_VAR(irq_count)
22242 jmp error_exit
22243 CFI_ENDPROC
22244-END(xen_do_hypervisor_callback)
22245+ENDPROC(xen_do_hypervisor_callback)
22246
22247 /*
22248 * Hypervisor uses this for application faults while it executes.
22249@@ -1449,7 +1979,7 @@ ENTRY(xen_failsafe_callback)
22250 SAVE_ALL
22251 jmp error_exit
22252 CFI_ENDPROC
22253-END(xen_failsafe_callback)
22254+ENDPROC(xen_failsafe_callback)
22255
22256 apicinterrupt HYPERVISOR_CALLBACK_VECTOR \
22257 xen_hvm_callback_vector xen_evtchn_do_upcall
22258@@ -1501,18 +2031,33 @@ ENTRY(paranoid_exit)
22259 DEFAULT_FRAME
22260 DISABLE_INTERRUPTS(CLBR_NONE)
22261 TRACE_IRQS_OFF_DEBUG
22262- testl %ebx,%ebx /* swapgs needed? */
22263+ testl $1,%ebx /* swapgs needed? */
22264 jnz paranoid_restore
22265- testl $3,CS(%rsp)
22266+ testb $3,CS(%rsp)
22267 jnz paranoid_userspace
22268+#ifdef CONFIG_PAX_MEMORY_UDEREF
22269+ pax_exit_kernel
22270+ TRACE_IRQS_IRETQ 0
22271+ SWAPGS_UNSAFE_STACK
22272+ RESTORE_ALL 8
22273+ pax_force_retaddr_bts
22274+ jmp irq_return
22275+#endif
22276 paranoid_swapgs:
22277+#ifdef CONFIG_PAX_MEMORY_UDEREF
22278+ pax_exit_kernel_user
22279+#else
22280+ pax_exit_kernel
22281+#endif
22282 TRACE_IRQS_IRETQ 0
22283 SWAPGS_UNSAFE_STACK
22284 RESTORE_ALL 8
22285 jmp irq_return
22286 paranoid_restore:
22287+ pax_exit_kernel
22288 TRACE_IRQS_IRETQ_DEBUG 0
22289 RESTORE_ALL 8
22290+ pax_force_retaddr_bts
22291 jmp irq_return
22292 paranoid_userspace:
22293 GET_THREAD_INFO(%rcx)
22294@@ -1541,7 +2086,7 @@ paranoid_schedule:
22295 TRACE_IRQS_OFF
22296 jmp paranoid_userspace
22297 CFI_ENDPROC
22298-END(paranoid_exit)
22299+ENDPROC(paranoid_exit)
22300
22301 /*
22302 * Exception entry point. This expects an error code/orig_rax on the stack.
22303@@ -1568,12 +2113,13 @@ ENTRY(error_entry)
22304 movq_cfi r14, R14+8
22305 movq_cfi r15, R15+8
22306 xorl %ebx,%ebx
22307- testl $3,CS+8(%rsp)
22308+ testb $3,CS+8(%rsp)
22309 je error_kernelspace
22310 error_swapgs:
22311 SWAPGS
22312 error_sti:
22313 TRACE_IRQS_OFF
22314+ pax_force_retaddr_bts
22315 ret
22316
22317 /*
22318@@ -1600,7 +2146,7 @@ bstep_iret:
22319 movq %rcx,RIP+8(%rsp)
22320 jmp error_swapgs
22321 CFI_ENDPROC
22322-END(error_entry)
22323+ENDPROC(error_entry)
22324
22325
22326 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
22327@@ -1611,7 +2157,7 @@ ENTRY(error_exit)
22328 DISABLE_INTERRUPTS(CLBR_NONE)
22329 TRACE_IRQS_OFF
22330 GET_THREAD_INFO(%rcx)
22331- testl %eax,%eax
22332+ testl $1,%eax
22333 jne retint_kernel
22334 LOCKDEP_SYS_EXIT_IRQ
22335 movl TI_flags(%rcx),%edx
22336@@ -1620,7 +2166,7 @@ ENTRY(error_exit)
22337 jnz retint_careful
22338 jmp retint_swapgs
22339 CFI_ENDPROC
22340-END(error_exit)
22341+ENDPROC(error_exit)
22342
22343 /*
22344 * Test if a given stack is an NMI stack or not.
22345@@ -1678,9 +2224,11 @@ ENTRY(nmi)
22346 * If %cs was not the kernel segment, then the NMI triggered in user
22347 * space, which means it is definitely not nested.
22348 */
22349+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
22350+ je 1f
22351 cmpl $__KERNEL_CS, 16(%rsp)
22352 jne first_nmi
22353-
22354+1:
22355 /*
22356 * Check the special variable on the stack to see if NMIs are
22357 * executing.
22358@@ -1714,8 +2262,7 @@ nested_nmi:
22359
22360 1:
22361 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
22362- leaq -1*8(%rsp), %rdx
22363- movq %rdx, %rsp
22364+ subq $8, %rsp
22365 CFI_ADJUST_CFA_OFFSET 1*8
22366 leaq -10*8(%rsp), %rdx
22367 pushq_cfi $__KERNEL_DS
22368@@ -1733,6 +2280,7 @@ nested_nmi_out:
22369 CFI_RESTORE rdx
22370
22371 /* No need to check faults here */
22372+# pax_force_retaddr_bts
22373 INTERRUPT_RETURN
22374
22375 CFI_RESTORE_STATE
22376@@ -1849,6 +2397,8 @@ end_repeat_nmi:
22377 */
22378 movq %cr2, %r12
22379
22380+ pax_enter_kernel_nmi
22381+
22382 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
22383 movq %rsp,%rdi
22384 movq $-1,%rsi
22385@@ -1861,26 +2411,31 @@ end_repeat_nmi:
22386 movq %r12, %cr2
22387 1:
22388
22389- testl %ebx,%ebx /* swapgs needed? */
22390+ testl $1,%ebx /* swapgs needed? */
22391 jnz nmi_restore
22392 nmi_swapgs:
22393 SWAPGS_UNSAFE_STACK
22394 nmi_restore:
22395+ pax_exit_kernel_nmi
22396 /* Pop the extra iret frame at once */
22397 RESTORE_ALL 6*8
22398+ testb $3, 8(%rsp)
22399+ jnz 1f
22400+ pax_force_retaddr_bts
22401+1:
22402
22403 /* Clear the NMI executing stack variable */
22404 movq $0, 5*8(%rsp)
22405 jmp irq_return
22406 CFI_ENDPROC
22407-END(nmi)
22408+ENDPROC(nmi)
22409
22410 ENTRY(ignore_sysret)
22411 CFI_STARTPROC
22412 mov $-ENOSYS,%eax
22413 sysret
22414 CFI_ENDPROC
22415-END(ignore_sysret)
22416+ENDPROC(ignore_sysret)
22417
22418 /*
22419 * End of kprobes section
22420diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
22421index 42a392a..fbbd930 100644
22422--- a/arch/x86/kernel/ftrace.c
22423+++ b/arch/x86/kernel/ftrace.c
22424@@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
22425 {
22426 unsigned char replaced[MCOUNT_INSN_SIZE];
22427
22428+ ip = ktla_ktva(ip);
22429+
22430 /*
22431 * Note: Due to modules and __init, code can
22432 * disappear and change, we need to protect against faulting
22433@@ -227,7 +229,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
22434 unsigned char old[MCOUNT_INSN_SIZE], *new;
22435 int ret;
22436
22437- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
22438+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
22439 new = ftrace_call_replace(ip, (unsigned long)func);
22440
22441 /* See comment above by declaration of modifying_ftrace_code */
22442@@ -238,7 +240,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
22443 /* Also update the regs callback function */
22444 if (!ret) {
22445 ip = (unsigned long)(&ftrace_regs_call);
22446- memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE);
22447+ memcpy(old, ktla_ktva((void *)&ftrace_regs_call), MCOUNT_INSN_SIZE);
22448 new = ftrace_call_replace(ip, (unsigned long)func);
22449 ret = ftrace_modify_code(ip, old, new);
22450 }
22451@@ -279,7 +281,7 @@ static int ftrace_write(unsigned long ip, const char *val, int size)
22452 * kernel identity mapping to modify code.
22453 */
22454 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
22455- ip = (unsigned long)__va(__pa_symbol(ip));
22456+ ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
22457
22458 return probe_kernel_write((void *)ip, val, size);
22459 }
22460@@ -289,7 +291,7 @@ static int add_break(unsigned long ip, const char *old)
22461 unsigned char replaced[MCOUNT_INSN_SIZE];
22462 unsigned char brk = BREAKPOINT_INSTRUCTION;
22463
22464- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
22465+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
22466 return -EFAULT;
22467
22468 /* Make sure it is what we expect it to be */
22469@@ -637,7 +639,7 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
22470 return ret;
22471
22472 fail_update:
22473- probe_kernel_write((void *)ip, &old_code[0], 1);
22474+ probe_kernel_write((void *)ktla_ktva(ip), &old_code[0], 1);
22475 goto out;
22476 }
22477
22478@@ -670,6 +672,8 @@ static int ftrace_mod_jmp(unsigned long ip,
22479 {
22480 unsigned char code[MCOUNT_INSN_SIZE];
22481
22482+ ip = ktla_ktva(ip);
22483+
22484 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
22485 return -EFAULT;
22486
22487diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
22488index 55b6761..a6456fc 100644
22489--- a/arch/x86/kernel/head64.c
22490+++ b/arch/x86/kernel/head64.c
22491@@ -67,12 +67,12 @@ again:
22492 pgd = *pgd_p;
22493
22494 /*
22495- * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
22496- * critical -- __PAGE_OFFSET would point us back into the dynamic
22497+ * The use of __early_va rather than __va here is critical:
22498+ * __va would point us back into the dynamic
22499 * range and we might end up looping forever...
22500 */
22501 if (pgd)
22502- pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
22503+ pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK));
22504 else {
22505 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
22506 reset_early_page_tables();
22507@@ -82,13 +82,13 @@ again:
22508 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
22509 for (i = 0; i < PTRS_PER_PUD; i++)
22510 pud_p[i] = 0;
22511- *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
22512+ *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE;
22513 }
22514 pud_p += pud_index(address);
22515 pud = *pud_p;
22516
22517 if (pud)
22518- pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
22519+ pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK));
22520 else {
22521 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
22522 reset_early_page_tables();
22523@@ -98,7 +98,7 @@ again:
22524 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
22525 for (i = 0; i < PTRS_PER_PMD; i++)
22526 pmd_p[i] = 0;
22527- *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
22528+ *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE;
22529 }
22530 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
22531 pmd_p[pmd_index(address)] = pmd;
22532@@ -175,7 +175,6 @@ void __init x86_64_start_kernel(char * real_mode_data)
22533 if (console_loglevel == 10)
22534 early_printk("Kernel alive\n");
22535
22536- clear_page(init_level4_pgt);
22537 /* set init_level4_pgt kernel high mapping*/
22538 init_level4_pgt[511] = early_level4_pgt[511];
22539
22540diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
22541index 73afd11..0ef46f2 100644
22542--- a/arch/x86/kernel/head_32.S
22543+++ b/arch/x86/kernel/head_32.S
22544@@ -26,6 +26,12 @@
22545 /* Physical address */
22546 #define pa(X) ((X) - __PAGE_OFFSET)
22547
22548+#ifdef CONFIG_PAX_KERNEXEC
22549+#define ta(X) (X)
22550+#else
22551+#define ta(X) ((X) - __PAGE_OFFSET)
22552+#endif
22553+
22554 /*
22555 * References to members of the new_cpu_data structure.
22556 */
22557@@ -55,11 +61,7 @@
22558 * and small than max_low_pfn, otherwise will waste some page table entries
22559 */
22560
22561-#if PTRS_PER_PMD > 1
22562-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
22563-#else
22564-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
22565-#endif
22566+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
22567
22568 /* Number of possible pages in the lowmem region */
22569 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
22570@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
22571 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
22572
22573 /*
22574+ * Real beginning of normal "text" segment
22575+ */
22576+ENTRY(stext)
22577+ENTRY(_stext)
22578+
22579+/*
22580 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
22581 * %esi points to the real-mode code as a 32-bit pointer.
22582 * CS and DS must be 4 GB flat segments, but we don't depend on
22583@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
22584 * can.
22585 */
22586 __HEAD
22587+
22588+#ifdef CONFIG_PAX_KERNEXEC
22589+ jmp startup_32
22590+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
22591+.fill PAGE_SIZE-5,1,0xcc
22592+#endif
22593+
22594 ENTRY(startup_32)
22595 movl pa(stack_start),%ecx
22596
22597@@ -106,6 +121,59 @@ ENTRY(startup_32)
22598 2:
22599 leal -__PAGE_OFFSET(%ecx),%esp
22600
22601+#ifdef CONFIG_SMP
22602+ movl $pa(cpu_gdt_table),%edi
22603+ movl $__per_cpu_load,%eax
22604+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
22605+ rorl $16,%eax
22606+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
22607+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
22608+ movl $__per_cpu_end - 1,%eax
22609+ subl $__per_cpu_start,%eax
22610+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
22611+#endif
22612+
22613+#ifdef CONFIG_PAX_MEMORY_UDEREF
22614+ movl $NR_CPUS,%ecx
22615+ movl $pa(cpu_gdt_table),%edi
22616+1:
22617+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
22618+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
22619+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
22620+ addl $PAGE_SIZE_asm,%edi
22621+ loop 1b
22622+#endif
22623+
22624+#ifdef CONFIG_PAX_KERNEXEC
22625+ movl $pa(boot_gdt),%edi
22626+ movl $__LOAD_PHYSICAL_ADDR,%eax
22627+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
22628+ rorl $16,%eax
22629+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
22630+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
22631+ rorl $16,%eax
22632+
22633+ ljmp $(__BOOT_CS),$1f
22634+1:
22635+
22636+ movl $NR_CPUS,%ecx
22637+ movl $pa(cpu_gdt_table),%edi
22638+ addl $__PAGE_OFFSET,%eax
22639+1:
22640+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
22641+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
22642+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
22643+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
22644+ rorl $16,%eax
22645+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
22646+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
22647+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
22648+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
22649+ rorl $16,%eax
22650+ addl $PAGE_SIZE_asm,%edi
22651+ loop 1b
22652+#endif
22653+
22654 /*
22655 * Clear BSS first so that there are no surprises...
22656 */
22657@@ -201,8 +269,11 @@ ENTRY(startup_32)
22658 movl %eax, pa(max_pfn_mapped)
22659
22660 /* Do early initialization of the fixmap area */
22661- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
22662- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
22663+#ifdef CONFIG_COMPAT_VDSO
22664+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
22665+#else
22666+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
22667+#endif
22668 #else /* Not PAE */
22669
22670 page_pde_offset = (__PAGE_OFFSET >> 20);
22671@@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
22672 movl %eax, pa(max_pfn_mapped)
22673
22674 /* Do early initialization of the fixmap area */
22675- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
22676- movl %eax,pa(initial_page_table+0xffc)
22677+#ifdef CONFIG_COMPAT_VDSO
22678+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
22679+#else
22680+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
22681+#endif
22682 #endif
22683
22684 #ifdef CONFIG_PARAVIRT
22685@@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
22686 cmpl $num_subarch_entries, %eax
22687 jae bad_subarch
22688
22689- movl pa(subarch_entries)(,%eax,4), %eax
22690- subl $__PAGE_OFFSET, %eax
22691- jmp *%eax
22692+ jmp *pa(subarch_entries)(,%eax,4)
22693
22694 bad_subarch:
22695 WEAK(lguest_entry)
22696@@ -261,10 +333,10 @@ WEAK(xen_entry)
22697 __INITDATA
22698
22699 subarch_entries:
22700- .long default_entry /* normal x86/PC */
22701- .long lguest_entry /* lguest hypervisor */
22702- .long xen_entry /* Xen hypervisor */
22703- .long default_entry /* Moorestown MID */
22704+ .long ta(default_entry) /* normal x86/PC */
22705+ .long ta(lguest_entry) /* lguest hypervisor */
22706+ .long ta(xen_entry) /* Xen hypervisor */
22707+ .long ta(default_entry) /* Moorestown MID */
22708 num_subarch_entries = (. - subarch_entries) / 4
22709 .previous
22710 #else
22711@@ -355,6 +427,7 @@ default_entry:
22712 movl pa(mmu_cr4_features),%eax
22713 movl %eax,%cr4
22714
22715+#ifdef CONFIG_X86_PAE
22716 testb $X86_CR4_PAE, %al # check if PAE is enabled
22717 jz enable_paging
22718
22719@@ -383,6 +456,9 @@ default_entry:
22720 /* Make changes effective */
22721 wrmsr
22722
22723+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
22724+#endif
22725+
22726 enable_paging:
22727
22728 /*
22729@@ -451,14 +527,20 @@ is486:
22730 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
22731 movl %eax,%ss # after changing gdt.
22732
22733- movl $(__USER_DS),%eax # DS/ES contains default USER segment
22734+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
22735 movl %eax,%ds
22736 movl %eax,%es
22737
22738 movl $(__KERNEL_PERCPU), %eax
22739 movl %eax,%fs # set this cpu's percpu
22740
22741+#ifdef CONFIG_CC_STACKPROTECTOR
22742 movl $(__KERNEL_STACK_CANARY),%eax
22743+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
22744+ movl $(__USER_DS),%eax
22745+#else
22746+ xorl %eax,%eax
22747+#endif
22748 movl %eax,%gs
22749
22750 xorl %eax,%eax # Clear LDT
22751@@ -534,8 +616,11 @@ setup_once:
22752 * relocation. Manually set base address in stack canary
22753 * segment descriptor.
22754 */
22755- movl $gdt_page,%eax
22756+ movl $cpu_gdt_table,%eax
22757 movl $stack_canary,%ecx
22758+#ifdef CONFIG_SMP
22759+ addl $__per_cpu_load,%ecx
22760+#endif
22761 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
22762 shrl $16, %ecx
22763 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
22764@@ -566,7 +651,7 @@ ENDPROC(early_idt_handlers)
22765 /* This is global to keep gas from relaxing the jumps */
22766 ENTRY(early_idt_handler)
22767 cld
22768- cmpl $2,%ss:early_recursion_flag
22769+ cmpl $1,%ss:early_recursion_flag
22770 je hlt_loop
22771 incl %ss:early_recursion_flag
22772
22773@@ -604,8 +689,8 @@ ENTRY(early_idt_handler)
22774 pushl (20+6*4)(%esp) /* trapno */
22775 pushl $fault_msg
22776 call printk
22777-#endif
22778 call dump_stack
22779+#endif
22780 hlt_loop:
22781 hlt
22782 jmp hlt_loop
22783@@ -624,8 +709,11 @@ ENDPROC(early_idt_handler)
22784 /* This is the default interrupt "handler" :-) */
22785 ALIGN
22786 ignore_int:
22787- cld
22788 #ifdef CONFIG_PRINTK
22789+ cmpl $2,%ss:early_recursion_flag
22790+ je hlt_loop
22791+ incl %ss:early_recursion_flag
22792+ cld
22793 pushl %eax
22794 pushl %ecx
22795 pushl %edx
22796@@ -634,9 +722,6 @@ ignore_int:
22797 movl $(__KERNEL_DS),%eax
22798 movl %eax,%ds
22799 movl %eax,%es
22800- cmpl $2,early_recursion_flag
22801- je hlt_loop
22802- incl early_recursion_flag
22803 pushl 16(%esp)
22804 pushl 24(%esp)
22805 pushl 32(%esp)
22806@@ -670,29 +755,43 @@ ENTRY(setup_once_ref)
22807 /*
22808 * BSS section
22809 */
22810-__PAGE_ALIGNED_BSS
22811- .align PAGE_SIZE
22812 #ifdef CONFIG_X86_PAE
22813+.section .initial_pg_pmd,"a",@progbits
22814 initial_pg_pmd:
22815 .fill 1024*KPMDS,4,0
22816 #else
22817+.section .initial_page_table,"a",@progbits
22818 ENTRY(initial_page_table)
22819 .fill 1024,4,0
22820 #endif
22821+.section .initial_pg_fixmap,"a",@progbits
22822 initial_pg_fixmap:
22823 .fill 1024,4,0
22824+.section .empty_zero_page,"a",@progbits
22825 ENTRY(empty_zero_page)
22826 .fill 4096,1,0
22827+.section .swapper_pg_dir,"a",@progbits
22828 ENTRY(swapper_pg_dir)
22829+#ifdef CONFIG_X86_PAE
22830+ .fill 4,8,0
22831+#else
22832 .fill 1024,4,0
22833+#endif
22834+
22835+/*
22836+ * The IDT has to be page-aligned to simplify the Pentium
22837+ * F0 0F bug workaround.. We have a special link segment
22838+ * for this.
22839+ */
22840+.section .idt,"a",@progbits
22841+ENTRY(idt_table)
22842+ .fill 256,8,0
22843
22844 /*
22845 * This starts the data section.
22846 */
22847 #ifdef CONFIG_X86_PAE
22848-__PAGE_ALIGNED_DATA
22849- /* Page-aligned for the benefit of paravirt? */
22850- .align PAGE_SIZE
22851+.section .initial_page_table,"a",@progbits
22852 ENTRY(initial_page_table)
22853 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
22854 # if KPMDS == 3
22855@@ -711,12 +810,20 @@ ENTRY(initial_page_table)
22856 # error "Kernel PMDs should be 1, 2 or 3"
22857 # endif
22858 .align PAGE_SIZE /* needs to be page-sized too */
22859+
22860+#ifdef CONFIG_PAX_PER_CPU_PGD
22861+ENTRY(cpu_pgd)
22862+ .rept 2*NR_CPUS
22863+ .fill 4,8,0
22864+ .endr
22865+#endif
22866+
22867 #endif
22868
22869 .data
22870 .balign 4
22871 ENTRY(stack_start)
22872- .long init_thread_union+THREAD_SIZE
22873+ .long init_thread_union+THREAD_SIZE-8
22874
22875 __INITRODATA
22876 int_msg:
22877@@ -744,7 +851,7 @@ fault_msg:
22878 * segment size, and 32-bit linear address value:
22879 */
22880
22881- .data
22882+.section .rodata,"a",@progbits
22883 .globl boot_gdt_descr
22884 .globl idt_descr
22885
22886@@ -753,7 +860,7 @@ fault_msg:
22887 .word 0 # 32 bit align gdt_desc.address
22888 boot_gdt_descr:
22889 .word __BOOT_DS+7
22890- .long boot_gdt - __PAGE_OFFSET
22891+ .long pa(boot_gdt)
22892
22893 .word 0 # 32-bit align idt_desc.address
22894 idt_descr:
22895@@ -764,7 +871,7 @@ idt_descr:
22896 .word 0 # 32 bit align gdt_desc.address
22897 ENTRY(early_gdt_descr)
22898 .word GDT_ENTRIES*8-1
22899- .long gdt_page /* Overwritten for secondary CPUs */
22900+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
22901
22902 /*
22903 * The boot_gdt must mirror the equivalent in setup.S and is
22904@@ -773,5 +880,65 @@ ENTRY(early_gdt_descr)
22905 .align L1_CACHE_BYTES
22906 ENTRY(boot_gdt)
22907 .fill GDT_ENTRY_BOOT_CS,8,0
22908- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
22909- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
22910+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
22911+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
22912+
22913+ .align PAGE_SIZE_asm
22914+ENTRY(cpu_gdt_table)
22915+ .rept NR_CPUS
22916+ .quad 0x0000000000000000 /* NULL descriptor */
22917+ .quad 0x0000000000000000 /* 0x0b reserved */
22918+ .quad 0x0000000000000000 /* 0x13 reserved */
22919+ .quad 0x0000000000000000 /* 0x1b reserved */
22920+
22921+#ifdef CONFIG_PAX_KERNEXEC
22922+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
22923+#else
22924+ .quad 0x0000000000000000 /* 0x20 unused */
22925+#endif
22926+
22927+ .quad 0x0000000000000000 /* 0x28 unused */
22928+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
22929+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
22930+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
22931+ .quad 0x0000000000000000 /* 0x4b reserved */
22932+ .quad 0x0000000000000000 /* 0x53 reserved */
22933+ .quad 0x0000000000000000 /* 0x5b reserved */
22934+
22935+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
22936+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
22937+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
22938+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
22939+
22940+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
22941+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
22942+
22943+ /*
22944+ * Segments used for calling PnP BIOS have byte granularity.
22945+ * The code segments and data segments have fixed 64k limits,
22946+ * the transfer segment sizes are set at run time.
22947+ */
22948+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
22949+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
22950+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
22951+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
22952+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
22953+
22954+ /*
22955+ * The APM segments have byte granularity and their bases
22956+ * are set at run time. All have 64k limits.
22957+ */
22958+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
22959+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
22960+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
22961+
22962+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
22963+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
22964+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
22965+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
22966+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
22967+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
22968+
22969+ /* Be sure this is zeroed to avoid false validations in Xen */
22970+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
22971+ .endr
22972diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
22973index a836860..1b5c665 100644
22974--- a/arch/x86/kernel/head_64.S
22975+++ b/arch/x86/kernel/head_64.S
22976@@ -20,6 +20,8 @@
22977 #include <asm/processor-flags.h>
22978 #include <asm/percpu.h>
22979 #include <asm/nops.h>
22980+#include <asm/cpufeature.h>
22981+#include <asm/alternative-asm.h>
22982
22983 #ifdef CONFIG_PARAVIRT
22984 #include <asm/asm-offsets.h>
22985@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
22986 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
22987 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
22988 L3_START_KERNEL = pud_index(__START_KERNEL_map)
22989+L4_VMALLOC_START = pgd_index(VMALLOC_START)
22990+L3_VMALLOC_START = pud_index(VMALLOC_START)
22991+L4_VMALLOC_END = pgd_index(VMALLOC_END)
22992+L3_VMALLOC_END = pud_index(VMALLOC_END)
22993+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
22994+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
22995
22996 .text
22997 __HEAD
22998@@ -89,11 +97,23 @@ startup_64:
22999 * Fixup the physical addresses in the page table
23000 */
23001 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
23002+ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
23003+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
23004+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
23005+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
23006+ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
23007
23008- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
23009- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
23010+ addq %rbp, level3_ident_pgt + (0*8)(%rip)
23011+#ifndef CONFIG_XEN
23012+ addq %rbp, level3_ident_pgt + (1*8)(%rip)
23013+#endif
23014
23015- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
23016+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
23017+
23018+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
23019+ addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
23020+
23021+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
23022
23023 /*
23024 * Set up the identity mapping for the switchover. These
23025@@ -177,8 +197,8 @@ ENTRY(secondary_startup_64)
23026 movq $(init_level4_pgt - __START_KERNEL_map), %rax
23027 1:
23028
23029- /* Enable PAE mode and PGE */
23030- movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
23031+ /* Enable PAE mode and PSE/PGE */
23032+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
23033 movq %rcx, %cr4
23034
23035 /* Setup early boot stage 4 level pagetables. */
23036@@ -199,10 +219,18 @@ ENTRY(secondary_startup_64)
23037 movl $MSR_EFER, %ecx
23038 rdmsr
23039 btsl $_EFER_SCE, %eax /* Enable System Call */
23040- btl $20,%edi /* No Execute supported? */
23041+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
23042 jnc 1f
23043 btsl $_EFER_NX, %eax
23044 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
23045+ leaq init_level4_pgt(%rip), %rdi
23046+#ifndef CONFIG_EFI
23047+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
23048+#endif
23049+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
23050+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
23051+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
23052+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
23053 1: wrmsr /* Make changes effective */
23054
23055 /* Setup cr0 */
23056@@ -282,6 +310,7 @@ ENTRY(secondary_startup_64)
23057 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
23058 * address given in m16:64.
23059 */
23060+ pax_set_fptr_mask
23061 movq initial_code(%rip),%rax
23062 pushq $0 # fake return address to stop unwinder
23063 pushq $__KERNEL_CS # set correct cs
23064@@ -388,7 +417,7 @@ ENTRY(early_idt_handler)
23065 call dump_stack
23066 #ifdef CONFIG_KALLSYMS
23067 leaq early_idt_ripmsg(%rip),%rdi
23068- movq 40(%rsp),%rsi # %rip again
23069+ movq 88(%rsp),%rsi # %rip again
23070 call __print_symbol
23071 #endif
23072 #endif /* EARLY_PRINTK */
23073@@ -416,6 +445,7 @@ ENDPROC(early_idt_handler)
23074 early_recursion_flag:
23075 .long 0
23076
23077+ .section .rodata,"a",@progbits
23078 #ifdef CONFIG_EARLY_PRINTK
23079 early_idt_msg:
23080 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
23081@@ -443,29 +473,52 @@ NEXT_PAGE(early_level4_pgt)
23082 NEXT_PAGE(early_dynamic_pgts)
23083 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
23084
23085- .data
23086+ .section .rodata,"a",@progbits
23087
23088-#ifndef CONFIG_XEN
23089 NEXT_PAGE(init_level4_pgt)
23090- .fill 512,8,0
23091-#else
23092-NEXT_PAGE(init_level4_pgt)
23093- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
23094 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
23095 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
23096+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
23097+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
23098+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
23099+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
23100+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
23101+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
23102 .org init_level4_pgt + L4_START_KERNEL*8, 0
23103 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
23104 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
23105
23106+#ifdef CONFIG_PAX_PER_CPU_PGD
23107+NEXT_PAGE(cpu_pgd)
23108+ .rept 2*NR_CPUS
23109+ .fill 512,8,0
23110+ .endr
23111+#endif
23112+
23113 NEXT_PAGE(level3_ident_pgt)
23114 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
23115+#ifdef CONFIG_XEN
23116 .fill 511, 8, 0
23117+#else
23118+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
23119+ .fill 510,8,0
23120+#endif
23121+
23122+NEXT_PAGE(level3_vmalloc_start_pgt)
23123+ .fill 512,8,0
23124+
23125+NEXT_PAGE(level3_vmalloc_end_pgt)
23126+ .fill 512,8,0
23127+
23128+NEXT_PAGE(level3_vmemmap_pgt)
23129+ .fill L3_VMEMMAP_START,8,0
23130+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
23131+
23132 NEXT_PAGE(level2_ident_pgt)
23133- /* Since I easily can, map the first 1G.
23134+ /* Since I easily can, map the first 2G.
23135 * Don't set NX because code runs from these pages.
23136 */
23137- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
23138-#endif
23139+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
23140
23141 NEXT_PAGE(level3_kernel_pgt)
23142 .fill L3_START_KERNEL,8,0
23143@@ -473,6 +526,9 @@ NEXT_PAGE(level3_kernel_pgt)
23144 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
23145 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
23146
23147+NEXT_PAGE(level2_vmemmap_pgt)
23148+ .fill 512,8,0
23149+
23150 NEXT_PAGE(level2_kernel_pgt)
23151 /*
23152 * 512 MB kernel mapping. We spend a full page on this pagetable
23153@@ -488,39 +544,70 @@ NEXT_PAGE(level2_kernel_pgt)
23154 KERNEL_IMAGE_SIZE/PMD_SIZE)
23155
23156 NEXT_PAGE(level2_fixmap_pgt)
23157- .fill 506,8,0
23158- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
23159- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
23160- .fill 5,8,0
23161+ .fill 507,8,0
23162+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
23163+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
23164+ .fill 4,8,0
23165
23166-NEXT_PAGE(level1_fixmap_pgt)
23167+NEXT_PAGE(level1_vsyscall_pgt)
23168 .fill 512,8,0
23169
23170 #undef PMDS
23171
23172- .data
23173+ .align PAGE_SIZE
23174+ENTRY(cpu_gdt_table)
23175+ .rept NR_CPUS
23176+ .quad 0x0000000000000000 /* NULL descriptor */
23177+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
23178+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
23179+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
23180+ .quad 0x00cffb000000ffff /* __USER32_CS */
23181+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
23182+ .quad 0x00affb000000ffff /* __USER_CS */
23183+
23184+#ifdef CONFIG_PAX_KERNEXEC
23185+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
23186+#else
23187+ .quad 0x0 /* unused */
23188+#endif
23189+
23190+ .quad 0,0 /* TSS */
23191+ .quad 0,0 /* LDT */
23192+ .quad 0,0,0 /* three TLS descriptors */
23193+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
23194+ /* asm/segment.h:GDT_ENTRIES must match this */
23195+
23196+#ifdef CONFIG_PAX_MEMORY_UDEREF
23197+ .quad 0x00cf93000000ffff /* __UDEREF_KERNEL_DS */
23198+#else
23199+ .quad 0x0 /* unused */
23200+#endif
23201+
23202+ /* zero the remaining page */
23203+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
23204+ .endr
23205+
23206 .align 16
23207 .globl early_gdt_descr
23208 early_gdt_descr:
23209 .word GDT_ENTRIES*8-1
23210 early_gdt_descr_base:
23211- .quad INIT_PER_CPU_VAR(gdt_page)
23212+ .quad cpu_gdt_table
23213
23214 ENTRY(phys_base)
23215 /* This must match the first entry in level2_kernel_pgt */
23216 .quad 0x0000000000000000
23217
23218 #include "../../x86/xen/xen-head.S"
23219-
23220- .section .bss, "aw", @nobits
23221+
23222+ .section .rodata,"a",@progbits
23223+NEXT_PAGE(empty_zero_page)
23224+ .skip PAGE_SIZE
23225+
23226 .align PAGE_SIZE
23227 ENTRY(idt_table)
23228- .skip IDT_ENTRIES * 16
23229+ .fill 512,8,0
23230
23231 .align L1_CACHE_BYTES
23232 ENTRY(nmi_idt_table)
23233- .skip IDT_ENTRIES * 16
23234-
23235- __PAGE_ALIGNED_BSS
23236-NEXT_PAGE(empty_zero_page)
23237- .skip PAGE_SIZE
23238+ .fill 512,8,0
23239diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
23240index 0fa6912..b37438b 100644
23241--- a/arch/x86/kernel/i386_ksyms_32.c
23242+++ b/arch/x86/kernel/i386_ksyms_32.c
23243@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
23244 EXPORT_SYMBOL(cmpxchg8b_emu);
23245 #endif
23246
23247+EXPORT_SYMBOL_GPL(cpu_gdt_table);
23248+
23249 /* Networking helper routines. */
23250 EXPORT_SYMBOL(csum_partial_copy_generic);
23251+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
23252+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
23253
23254 EXPORT_SYMBOL(__get_user_1);
23255 EXPORT_SYMBOL(__get_user_2);
23256@@ -37,3 +41,11 @@ EXPORT_SYMBOL(strstr);
23257
23258 EXPORT_SYMBOL(csum_partial);
23259 EXPORT_SYMBOL(empty_zero_page);
23260+
23261+#ifdef CONFIG_PAX_KERNEXEC
23262+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
23263+#endif
23264+
23265+#ifdef CONFIG_PAX_PER_CPU_PGD
23266+EXPORT_SYMBOL(cpu_pgd);
23267+#endif
23268diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
23269index f7ea30d..6318acc 100644
23270--- a/arch/x86/kernel/i387.c
23271+++ b/arch/x86/kernel/i387.c
23272@@ -51,7 +51,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
23273 static inline bool interrupted_user_mode(void)
23274 {
23275 struct pt_regs *regs = get_irq_regs();
23276- return regs && user_mode_vm(regs);
23277+ return regs && user_mode(regs);
23278 }
23279
23280 /*
23281diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
23282index 9a5c460..84868423 100644
23283--- a/arch/x86/kernel/i8259.c
23284+++ b/arch/x86/kernel/i8259.c
23285@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
23286 static void make_8259A_irq(unsigned int irq)
23287 {
23288 disable_irq_nosync(irq);
23289- io_apic_irqs &= ~(1<<irq);
23290+ io_apic_irqs &= ~(1UL<<irq);
23291 irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
23292 i8259A_chip.name);
23293 enable_irq(irq);
23294@@ -209,7 +209,7 @@ spurious_8259A_irq:
23295 "spurious 8259A interrupt: IRQ%d.\n", irq);
23296 spurious_irq_mask |= irqmask;
23297 }
23298- atomic_inc(&irq_err_count);
23299+ atomic_inc_unchecked(&irq_err_count);
23300 /*
23301 * Theoretically we do not have to handle this IRQ,
23302 * but in Linux this does not cause problems and is
23303@@ -333,14 +333,16 @@ static void init_8259A(int auto_eoi)
23304 /* (slave's support for AEOI in flat mode is to be investigated) */
23305 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
23306
23307+ pax_open_kernel();
23308 if (auto_eoi)
23309 /*
23310 * In AEOI mode we just have to mask the interrupt
23311 * when acking.
23312 */
23313- i8259A_chip.irq_mask_ack = disable_8259A_irq;
23314+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
23315 else
23316- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
23317+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
23318+ pax_close_kernel();
23319
23320 udelay(100); /* wait for 8259A to initialize */
23321
23322diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
23323index a979b5b..1d6db75 100644
23324--- a/arch/x86/kernel/io_delay.c
23325+++ b/arch/x86/kernel/io_delay.c
23326@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
23327 * Quirk table for systems that misbehave (lock up, etc.) if port
23328 * 0x80 is used:
23329 */
23330-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
23331+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
23332 {
23333 .callback = dmi_io_delay_0xed_port,
23334 .ident = "Compaq Presario V6000",
23335diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
23336index 4ddaf66..6292f4e 100644
23337--- a/arch/x86/kernel/ioport.c
23338+++ b/arch/x86/kernel/ioport.c
23339@@ -6,6 +6,7 @@
23340 #include <linux/sched.h>
23341 #include <linux/kernel.h>
23342 #include <linux/capability.h>
23343+#include <linux/security.h>
23344 #include <linux/errno.h>
23345 #include <linux/types.h>
23346 #include <linux/ioport.h>
23347@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
23348
23349 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
23350 return -EINVAL;
23351+#ifdef CONFIG_GRKERNSEC_IO
23352+ if (turn_on && grsec_disable_privio) {
23353+ gr_handle_ioperm();
23354+ return -EPERM;
23355+ }
23356+#endif
23357 if (turn_on && !capable(CAP_SYS_RAWIO))
23358 return -EPERM;
23359
23360@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
23361 * because the ->io_bitmap_max value must match the bitmap
23362 * contents:
23363 */
23364- tss = &per_cpu(init_tss, get_cpu());
23365+ tss = init_tss + get_cpu();
23366
23367 if (turn_on)
23368 bitmap_clear(t->io_bitmap_ptr, from, num);
23369@@ -103,6 +110,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
23370 return -EINVAL;
23371 /* Trying to gain more privileges? */
23372 if (level > old) {
23373+#ifdef CONFIG_GRKERNSEC_IO
23374+ if (grsec_disable_privio) {
23375+ gr_handle_iopl();
23376+ return -EPERM;
23377+ }
23378+#endif
23379 if (!capable(CAP_SYS_RAWIO))
23380 return -EPERM;
23381 }
23382diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
23383index ac0631d..ff7cb62 100644
23384--- a/arch/x86/kernel/irq.c
23385+++ b/arch/x86/kernel/irq.c
23386@@ -18,7 +18,7 @@
23387 #include <asm/mce.h>
23388 #include <asm/hw_irq.h>
23389
23390-atomic_t irq_err_count;
23391+atomic_unchecked_t irq_err_count;
23392
23393 /* Function pointer for generic interrupt vector handling */
23394 void (*x86_platform_ipi_callback)(void) = NULL;
23395@@ -122,9 +122,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
23396 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
23397 seq_printf(p, " Machine check polls\n");
23398 #endif
23399- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
23400+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
23401 #if defined(CONFIG_X86_IO_APIC)
23402- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
23403+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
23404 #endif
23405 return 0;
23406 }
23407@@ -164,7 +164,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
23408
23409 u64 arch_irq_stat(void)
23410 {
23411- u64 sum = atomic_read(&irq_err_count);
23412+ u64 sum = atomic_read_unchecked(&irq_err_count);
23413 return sum;
23414 }
23415
23416diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
23417index 344faf8..355f60d 100644
23418--- a/arch/x86/kernel/irq_32.c
23419+++ b/arch/x86/kernel/irq_32.c
23420@@ -39,7 +39,7 @@ static int check_stack_overflow(void)
23421 __asm__ __volatile__("andl %%esp,%0" :
23422 "=r" (sp) : "0" (THREAD_SIZE - 1));
23423
23424- return sp < (sizeof(struct thread_info) + STACK_WARN);
23425+ return sp < STACK_WARN;
23426 }
23427
23428 static void print_stack_overflow(void)
23429@@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
23430 * per-CPU IRQ handling contexts (thread information and stack)
23431 */
23432 union irq_ctx {
23433- struct thread_info tinfo;
23434- u32 stack[THREAD_SIZE/sizeof(u32)];
23435+ unsigned long previous_esp;
23436+ u32 stack[THREAD_SIZE/sizeof(u32)];
23437 } __attribute__((aligned(THREAD_SIZE)));
23438
23439 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
23440@@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
23441 static inline int
23442 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
23443 {
23444- union irq_ctx *curctx, *irqctx;
23445+ union irq_ctx *irqctx;
23446 u32 *isp, arg1, arg2;
23447
23448- curctx = (union irq_ctx *) current_thread_info();
23449 irqctx = __this_cpu_read(hardirq_ctx);
23450
23451 /*
23452@@ -92,16 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
23453 * handler) we can't do that and just have to keep using the
23454 * current stack (which is the irq stack already after all)
23455 */
23456- if (unlikely(curctx == irqctx))
23457+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
23458 return 0;
23459
23460 /* build the stack frame on the IRQ stack */
23461- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
23462- irqctx->tinfo.task = curctx->tinfo.task;
23463- irqctx->tinfo.previous_esp = current_stack_pointer;
23464+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
23465+ irqctx->previous_esp = current_stack_pointer;
23466
23467- /* Copy the preempt_count so that the [soft]irq checks work. */
23468- irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
23469+#ifdef CONFIG_PAX_MEMORY_UDEREF
23470+ __set_fs(MAKE_MM_SEG(0));
23471+#endif
23472
23473 if (unlikely(overflow))
23474 call_on_stack(print_stack_overflow, isp);
23475@@ -113,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
23476 : "0" (irq), "1" (desc), "2" (isp),
23477 "D" (desc->handle_irq)
23478 : "memory", "cc", "ecx");
23479+
23480+#ifdef CONFIG_PAX_MEMORY_UDEREF
23481+ __set_fs(current_thread_info()->addr_limit);
23482+#endif
23483+
23484 return 1;
23485 }
23486
23487@@ -121,29 +125,14 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
23488 */
23489 void __cpuinit irq_ctx_init(int cpu)
23490 {
23491- union irq_ctx *irqctx;
23492-
23493 if (per_cpu(hardirq_ctx, cpu))
23494 return;
23495
23496- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
23497- THREADINFO_GFP,
23498- THREAD_SIZE_ORDER));
23499- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
23500- irqctx->tinfo.cpu = cpu;
23501- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
23502- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
23503-
23504- per_cpu(hardirq_ctx, cpu) = irqctx;
23505-
23506- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
23507- THREADINFO_GFP,
23508- THREAD_SIZE_ORDER));
23509- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
23510- irqctx->tinfo.cpu = cpu;
23511- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
23512-
23513- per_cpu(softirq_ctx, cpu) = irqctx;
23514+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
23515+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
23516+
23517+ printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
23518+ cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
23519
23520 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
23521 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
23522@@ -152,7 +141,6 @@ void __cpuinit irq_ctx_init(int cpu)
23523 asmlinkage void do_softirq(void)
23524 {
23525 unsigned long flags;
23526- struct thread_info *curctx;
23527 union irq_ctx *irqctx;
23528 u32 *isp;
23529
23530@@ -162,15 +150,22 @@ asmlinkage void do_softirq(void)
23531 local_irq_save(flags);
23532
23533 if (local_softirq_pending()) {
23534- curctx = current_thread_info();
23535 irqctx = __this_cpu_read(softirq_ctx);
23536- irqctx->tinfo.task = curctx->task;
23537- irqctx->tinfo.previous_esp = current_stack_pointer;
23538+ irqctx->previous_esp = current_stack_pointer;
23539
23540 /* build the stack frame on the softirq stack */
23541- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
23542+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
23543+
23544+#ifdef CONFIG_PAX_MEMORY_UDEREF
23545+ __set_fs(MAKE_MM_SEG(0));
23546+#endif
23547
23548 call_on_stack(__do_softirq, isp);
23549+
23550+#ifdef CONFIG_PAX_MEMORY_UDEREF
23551+ __set_fs(current_thread_info()->addr_limit);
23552+#endif
23553+
23554 /*
23555 * Shouldn't happen, we returned above if in_interrupt():
23556 */
23557@@ -191,7 +186,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
23558 if (unlikely(!desc))
23559 return false;
23560
23561- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
23562+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
23563 if (unlikely(overflow))
23564 print_stack_overflow();
23565 desc->handle_irq(irq, desc);
23566diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
23567index d04d3ec..ea4b374 100644
23568--- a/arch/x86/kernel/irq_64.c
23569+++ b/arch/x86/kernel/irq_64.c
23570@@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
23571 u64 estack_top, estack_bottom;
23572 u64 curbase = (u64)task_stack_page(current);
23573
23574- if (user_mode_vm(regs))
23575+ if (user_mode(regs))
23576 return;
23577
23578 if (regs->sp >= curbase + sizeof(struct thread_info) +
23579diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
23580index dc1404b..bbc43e7 100644
23581--- a/arch/x86/kernel/kdebugfs.c
23582+++ b/arch/x86/kernel/kdebugfs.c
23583@@ -27,7 +27,7 @@ struct setup_data_node {
23584 u32 len;
23585 };
23586
23587-static ssize_t setup_data_read(struct file *file, char __user *user_buf,
23588+static ssize_t __size_overflow(3) setup_data_read(struct file *file, char __user *user_buf,
23589 size_t count, loff_t *ppos)
23590 {
23591 struct setup_data_node *node = file->private_data;
23592diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
23593index 836f832..a8bda67 100644
23594--- a/arch/x86/kernel/kgdb.c
23595+++ b/arch/x86/kernel/kgdb.c
23596@@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
23597 #ifdef CONFIG_X86_32
23598 switch (regno) {
23599 case GDB_SS:
23600- if (!user_mode_vm(regs))
23601+ if (!user_mode(regs))
23602 *(unsigned long *)mem = __KERNEL_DS;
23603 break;
23604 case GDB_SP:
23605- if (!user_mode_vm(regs))
23606+ if (!user_mode(regs))
23607 *(unsigned long *)mem = kernel_stack_pointer(regs);
23608 break;
23609 case GDB_GS:
23610@@ -229,7 +229,10 @@ static void kgdb_correct_hw_break(void)
23611 bp->attr.bp_addr = breakinfo[breakno].addr;
23612 bp->attr.bp_len = breakinfo[breakno].len;
23613 bp->attr.bp_type = breakinfo[breakno].type;
23614- info->address = breakinfo[breakno].addr;
23615+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
23616+ info->address = ktla_ktva(breakinfo[breakno].addr);
23617+ else
23618+ info->address = breakinfo[breakno].addr;
23619 info->len = breakinfo[breakno].len;
23620 info->type = breakinfo[breakno].type;
23621 val = arch_install_hw_breakpoint(bp);
23622@@ -476,12 +479,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
23623 case 'k':
23624 /* clear the trace bit */
23625 linux_regs->flags &= ~X86_EFLAGS_TF;
23626- atomic_set(&kgdb_cpu_doing_single_step, -1);
23627+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
23628
23629 /* set the trace bit if we're stepping */
23630 if (remcomInBuffer[0] == 's') {
23631 linux_regs->flags |= X86_EFLAGS_TF;
23632- atomic_set(&kgdb_cpu_doing_single_step,
23633+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
23634 raw_smp_processor_id());
23635 }
23636
23637@@ -546,7 +549,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
23638
23639 switch (cmd) {
23640 case DIE_DEBUG:
23641- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
23642+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
23643 if (user_mode(regs))
23644 return single_step_cont(regs, args);
23645 break;
23646@@ -751,11 +754,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
23647 #endif /* CONFIG_DEBUG_RODATA */
23648
23649 bpt->type = BP_BREAKPOINT;
23650- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
23651+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
23652 BREAK_INSTR_SIZE);
23653 if (err)
23654 return err;
23655- err = probe_kernel_write((char *)bpt->bpt_addr,
23656+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
23657 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
23658 #ifdef CONFIG_DEBUG_RODATA
23659 if (!err)
23660@@ -768,7 +771,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
23661 return -EBUSY;
23662 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
23663 BREAK_INSTR_SIZE);
23664- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
23665+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
23666 if (err)
23667 return err;
23668 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
23669@@ -793,13 +796,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
23670 if (mutex_is_locked(&text_mutex))
23671 goto knl_write;
23672 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
23673- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
23674+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
23675 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
23676 goto knl_write;
23677 return err;
23678 knl_write:
23679 #endif /* CONFIG_DEBUG_RODATA */
23680- return probe_kernel_write((char *)bpt->bpt_addr,
23681+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
23682 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
23683 }
23684
23685diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
23686index 211bce4..6e2580a 100644
23687--- a/arch/x86/kernel/kprobes/core.c
23688+++ b/arch/x86/kernel/kprobes/core.c
23689@@ -119,9 +119,12 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
23690 s32 raddr;
23691 } __packed *insn;
23692
23693- insn = (struct __arch_relative_insn *)from;
23694+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
23695+
23696+ pax_open_kernel();
23697 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
23698 insn->op = op;
23699+ pax_close_kernel();
23700 }
23701
23702 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
23703@@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes)
23704 kprobe_opcode_t opcode;
23705 kprobe_opcode_t *orig_opcodes = opcodes;
23706
23707- if (search_exception_tables((unsigned long)opcodes))
23708+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
23709 return 0; /* Page fault may occur on this address. */
23710
23711 retry:
23712@@ -238,9 +241,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
23713 * for the first byte, we can recover the original instruction
23714 * from it and kp->opcode.
23715 */
23716- memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
23717+ memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
23718 buf[0] = kp->opcode;
23719- return (unsigned long)buf;
23720+ return ktva_ktla((unsigned long)buf);
23721 }
23722
23723 /*
23724@@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
23725 /* Another subsystem puts a breakpoint, failed to recover */
23726 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
23727 return 0;
23728+ pax_open_kernel();
23729 memcpy(dest, insn.kaddr, insn.length);
23730+ pax_close_kernel();
23731
23732 #ifdef CONFIG_X86_64
23733 if (insn_rip_relative(&insn)) {
23734@@ -359,7 +364,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
23735 return 0;
23736 }
23737 disp = (u8 *) dest + insn_offset_displacement(&insn);
23738+ pax_open_kernel();
23739 *(s32 *) disp = (s32) newdisp;
23740+ pax_close_kernel();
23741 }
23742 #endif
23743 return insn.length;
23744@@ -498,7 +505,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
23745 * nor set current_kprobe, because it doesn't use single
23746 * stepping.
23747 */
23748- regs->ip = (unsigned long)p->ainsn.insn;
23749+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
23750 preempt_enable_no_resched();
23751 return;
23752 }
23753@@ -515,9 +522,9 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
23754 regs->flags &= ~X86_EFLAGS_IF;
23755 /* single step inline if the instruction is an int3 */
23756 if (p->opcode == BREAKPOINT_INSTRUCTION)
23757- regs->ip = (unsigned long)p->addr;
23758+ regs->ip = ktla_ktva((unsigned long)p->addr);
23759 else
23760- regs->ip = (unsigned long)p->ainsn.insn;
23761+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
23762 }
23763
23764 /*
23765@@ -596,7 +603,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
23766 setup_singlestep(p, regs, kcb, 0);
23767 return 1;
23768 }
23769- } else if (*addr != BREAKPOINT_INSTRUCTION) {
23770+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
23771 /*
23772 * The breakpoint instruction was removed right
23773 * after we hit it. Another cpu has removed
23774@@ -642,6 +649,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
23775 " movq %rax, 152(%rsp)\n"
23776 RESTORE_REGS_STRING
23777 " popfq\n"
23778+#ifdef KERNEXEC_PLUGIN
23779+ " btsq $63,(%rsp)\n"
23780+#endif
23781 #else
23782 " pushf\n"
23783 SAVE_REGS_STRING
23784@@ -779,7 +789,7 @@ static void __kprobes
23785 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
23786 {
23787 unsigned long *tos = stack_addr(regs);
23788- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
23789+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
23790 unsigned long orig_ip = (unsigned long)p->addr;
23791 kprobe_opcode_t *insn = p->ainsn.insn;
23792
23793@@ -961,7 +971,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
23794 struct die_args *args = data;
23795 int ret = NOTIFY_DONE;
23796
23797- if (args->regs && user_mode_vm(args->regs))
23798+ if (args->regs && user_mode(args->regs))
23799 return ret;
23800
23801 switch (val) {
23802diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
23803index 76dc6f0..66bdfc3 100644
23804--- a/arch/x86/kernel/kprobes/opt.c
23805+++ b/arch/x86/kernel/kprobes/opt.c
23806@@ -79,6 +79,7 @@ found:
23807 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
23808 static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
23809 {
23810+ pax_open_kernel();
23811 #ifdef CONFIG_X86_64
23812 *addr++ = 0x48;
23813 *addr++ = 0xbf;
23814@@ -86,6 +87,7 @@ static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long v
23815 *addr++ = 0xb8;
23816 #endif
23817 *(unsigned long *)addr = val;
23818+ pax_close_kernel();
23819 }
23820
23821 static void __used __kprobes kprobes_optinsn_template_holder(void)
23822@@ -338,7 +340,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
23823 * Verify if the address gap is in 2GB range, because this uses
23824 * a relative jump.
23825 */
23826- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
23827+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
23828 if (abs(rel) > 0x7fffffff)
23829 return -ERANGE;
23830
23831@@ -353,16 +355,18 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
23832 op->optinsn.size = ret;
23833
23834 /* Copy arch-dep-instance from template */
23835- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
23836+ pax_open_kernel();
23837+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
23838+ pax_close_kernel();
23839
23840 /* Set probe information */
23841 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
23842
23843 /* Set probe function call */
23844- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
23845+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
23846
23847 /* Set returning jmp instruction at the tail of out-of-line buffer */
23848- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
23849+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
23850 (u8 *)op->kp.addr + op->optinsn.size);
23851
23852 flush_icache_range((unsigned long) buf,
23853@@ -385,7 +389,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
23854 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
23855
23856 /* Backup instructions which will be replaced by jump address */
23857- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
23858+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
23859 RELATIVE_ADDR_SIZE);
23860
23861 insn_buf[0] = RELATIVEJUMP_OPCODE;
23862@@ -483,7 +487,7 @@ setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
23863 /* This kprobe is really able to run optimized path. */
23864 op = container_of(p, struct optimized_kprobe, kp);
23865 /* Detour through copied instructions */
23866- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
23867+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
23868 if (!reenter)
23869 reset_current_kprobe();
23870 preempt_enable_no_resched();
23871diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
23872index cd6d9a5..16245a4 100644
23873--- a/arch/x86/kernel/kvm.c
23874+++ b/arch/x86/kernel/kvm.c
23875@@ -455,7 +455,7 @@ static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
23876 return NOTIFY_OK;
23877 }
23878
23879-static struct notifier_block __cpuinitdata kvm_cpu_notifier = {
23880+static struct notifier_block kvm_cpu_notifier = {
23881 .notifier_call = kvm_cpu_notify,
23882 };
23883 #endif
23884diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
23885index ebc9873..1b9724b 100644
23886--- a/arch/x86/kernel/ldt.c
23887+++ b/arch/x86/kernel/ldt.c
23888@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
23889 if (reload) {
23890 #ifdef CONFIG_SMP
23891 preempt_disable();
23892- load_LDT(pc);
23893+ load_LDT_nolock(pc);
23894 if (!cpumask_equal(mm_cpumask(current->mm),
23895 cpumask_of(smp_processor_id())))
23896 smp_call_function(flush_ldt, current->mm, 1);
23897 preempt_enable();
23898 #else
23899- load_LDT(pc);
23900+ load_LDT_nolock(pc);
23901 #endif
23902 }
23903 if (oldsize) {
23904@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
23905 return err;
23906
23907 for (i = 0; i < old->size; i++)
23908- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
23909+ write_ldt_entry(new->ldt, i, old->ldt + i);
23910 return 0;
23911 }
23912
23913@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
23914 retval = copy_ldt(&mm->context, &old_mm->context);
23915 mutex_unlock(&old_mm->context.lock);
23916 }
23917+
23918+ if (tsk == current) {
23919+ mm->context.vdso = 0;
23920+
23921+#ifdef CONFIG_X86_32
23922+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23923+ mm->context.user_cs_base = 0UL;
23924+ mm->context.user_cs_limit = ~0UL;
23925+
23926+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
23927+ cpus_clear(mm->context.cpu_user_cs_mask);
23928+#endif
23929+
23930+#endif
23931+#endif
23932+
23933+ }
23934+
23935 return retval;
23936 }
23937
23938@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
23939 }
23940 }
23941
23942+#ifdef CONFIG_PAX_SEGMEXEC
23943+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
23944+ error = -EINVAL;
23945+ goto out_unlock;
23946+ }
23947+#endif
23948+
23949 fill_ldt(&ldt, &ldt_info);
23950 if (oldmode)
23951 ldt.avl = 0;
23952diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
23953index 5b19e4d..6476a76 100644
23954--- a/arch/x86/kernel/machine_kexec_32.c
23955+++ b/arch/x86/kernel/machine_kexec_32.c
23956@@ -26,7 +26,7 @@
23957 #include <asm/cacheflush.h>
23958 #include <asm/debugreg.h>
23959
23960-static void set_idt(void *newidt, __u16 limit)
23961+static void set_idt(struct desc_struct *newidt, __u16 limit)
23962 {
23963 struct desc_ptr curidt;
23964
23965@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
23966 }
23967
23968
23969-static void set_gdt(void *newgdt, __u16 limit)
23970+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
23971 {
23972 struct desc_ptr curgdt;
23973
23974@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
23975 }
23976
23977 control_page = page_address(image->control_code_page);
23978- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
23979+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
23980
23981 relocate_kernel_ptr = control_page;
23982 page_list[PA_CONTROL_PAGE] = __pa(control_page);
23983diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
23984index 22db92b..d546bec 100644
23985--- a/arch/x86/kernel/microcode_core.c
23986+++ b/arch/x86/kernel/microcode_core.c
23987@@ -513,7 +513,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
23988 return NOTIFY_OK;
23989 }
23990
23991-static struct notifier_block __refdata mc_cpu_notifier = {
23992+static struct notifier_block mc_cpu_notifier = {
23993 .notifier_call = mc_cpu_callback,
23994 };
23995
23996diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
23997index 5fb2ceb..3ae90bb 100644
23998--- a/arch/x86/kernel/microcode_intel.c
23999+++ b/arch/x86/kernel/microcode_intel.c
24000@@ -293,13 +293,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
24001
24002 static int get_ucode_user(void *to, const void *from, size_t n)
24003 {
24004- return copy_from_user(to, from, n);
24005+ return copy_from_user(to, (const void __force_user *)from, n);
24006 }
24007
24008 static enum ucode_state
24009 request_microcode_user(int cpu, const void __user *buf, size_t size)
24010 {
24011- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
24012+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
24013 }
24014
24015 static void microcode_fini_cpu(int cpu)
24016diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
24017index 216a4d7..228255a 100644
24018--- a/arch/x86/kernel/module.c
24019+++ b/arch/x86/kernel/module.c
24020@@ -43,15 +43,60 @@ do { \
24021 } while (0)
24022 #endif
24023
24024-void *module_alloc(unsigned long size)
24025+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
24026 {
24027- if (PAGE_ALIGN(size) > MODULES_LEN)
24028+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
24029 return NULL;
24030 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
24031- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
24032+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
24033 -1, __builtin_return_address(0));
24034 }
24035
24036+void *module_alloc(unsigned long size)
24037+{
24038+
24039+#ifdef CONFIG_PAX_KERNEXEC
24040+ return __module_alloc(size, PAGE_KERNEL);
24041+#else
24042+ return __module_alloc(size, PAGE_KERNEL_EXEC);
24043+#endif
24044+
24045+}
24046+
24047+#ifdef CONFIG_PAX_KERNEXEC
24048+#ifdef CONFIG_X86_32
24049+void *module_alloc_exec(unsigned long size)
24050+{
24051+ struct vm_struct *area;
24052+
24053+ if (size == 0)
24054+ return NULL;
24055+
24056+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
24057+ return area ? area->addr : NULL;
24058+}
24059+EXPORT_SYMBOL(module_alloc_exec);
24060+
24061+void module_free_exec(struct module *mod, void *module_region)
24062+{
24063+ vunmap(module_region);
24064+}
24065+EXPORT_SYMBOL(module_free_exec);
24066+#else
24067+void module_free_exec(struct module *mod, void *module_region)
24068+{
24069+ module_free(mod, module_region);
24070+}
24071+EXPORT_SYMBOL(module_free_exec);
24072+
24073+void *module_alloc_exec(unsigned long size)
24074+{
24075+ return __module_alloc(size, PAGE_KERNEL_RX);
24076+}
24077+EXPORT_SYMBOL(module_alloc_exec);
24078+#endif
24079+#endif
24080+
24081 #ifdef CONFIG_X86_32
24082 int apply_relocate(Elf32_Shdr *sechdrs,
24083 const char *strtab,
24084@@ -62,14 +107,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
24085 unsigned int i;
24086 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
24087 Elf32_Sym *sym;
24088- uint32_t *location;
24089+ uint32_t *plocation, location;
24090
24091 DEBUGP("Applying relocate section %u to %u\n",
24092 relsec, sechdrs[relsec].sh_info);
24093 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
24094 /* This is where to make the change */
24095- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
24096- + rel[i].r_offset;
24097+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
24098+ location = (uint32_t)plocation;
24099+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
24100+ plocation = ktla_ktva((void *)plocation);
24101 /* This is the symbol it is referring to. Note that all
24102 undefined symbols have been resolved. */
24103 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
24104@@ -78,11 +125,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
24105 switch (ELF32_R_TYPE(rel[i].r_info)) {
24106 case R_386_32:
24107 /* We add the value into the location given */
24108- *location += sym->st_value;
24109+ pax_open_kernel();
24110+ *plocation += sym->st_value;
24111+ pax_close_kernel();
24112 break;
24113 case R_386_PC32:
24114 /* Add the value, subtract its position */
24115- *location += sym->st_value - (uint32_t)location;
24116+ pax_open_kernel();
24117+ *plocation += sym->st_value - location;
24118+ pax_close_kernel();
24119 break;
24120 default:
24121 pr_err("%s: Unknown relocation: %u\n",
24122@@ -127,21 +178,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
24123 case R_X86_64_NONE:
24124 break;
24125 case R_X86_64_64:
24126+ pax_open_kernel();
24127 *(u64 *)loc = val;
24128+ pax_close_kernel();
24129 break;
24130 case R_X86_64_32:
24131+ pax_open_kernel();
24132 *(u32 *)loc = val;
24133+ pax_close_kernel();
24134 if (val != *(u32 *)loc)
24135 goto overflow;
24136 break;
24137 case R_X86_64_32S:
24138+ pax_open_kernel();
24139 *(s32 *)loc = val;
24140+ pax_close_kernel();
24141 if ((s64)val != *(s32 *)loc)
24142 goto overflow;
24143 break;
24144 case R_X86_64_PC32:
24145 val -= (u64)loc;
24146+ pax_open_kernel();
24147 *(u32 *)loc = val;
24148+ pax_close_kernel();
24149+
24150 #if 0
24151 if ((s64)val != *(s32 *)loc)
24152 goto overflow;
24153diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
24154index ce13049..e2e9c3c 100644
24155--- a/arch/x86/kernel/msr.c
24156+++ b/arch/x86/kernel/msr.c
24157@@ -233,7 +233,7 @@ static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb,
24158 return notifier_from_errno(err);
24159 }
24160
24161-static struct notifier_block __refdata msr_class_cpu_notifier = {
24162+static struct notifier_block msr_class_cpu_notifier = {
24163 .notifier_call = msr_class_cpu_callback,
24164 };
24165
24166diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
24167index 6030805..2d33f21 100644
24168--- a/arch/x86/kernel/nmi.c
24169+++ b/arch/x86/kernel/nmi.c
24170@@ -105,7 +105,7 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2
24171 return handled;
24172 }
24173
24174-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
24175+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
24176 {
24177 struct nmi_desc *desc = nmi_to_desc(type);
24178 unsigned long flags;
24179@@ -129,9 +129,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
24180 * event confuses some handlers (kdump uses this flag)
24181 */
24182 if (action->flags & NMI_FLAG_FIRST)
24183- list_add_rcu(&action->list, &desc->head);
24184+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
24185 else
24186- list_add_tail_rcu(&action->list, &desc->head);
24187+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
24188
24189 spin_unlock_irqrestore(&desc->lock, flags);
24190 return 0;
24191@@ -154,7 +154,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
24192 if (!strcmp(n->name, name)) {
24193 WARN(in_nmi(),
24194 "Trying to free NMI (%s) from NMI context!\n", n->name);
24195- list_del_rcu(&n->list);
24196+ pax_list_del_rcu((struct list_head *)&n->list);
24197 break;
24198 }
24199 }
24200@@ -479,6 +479,17 @@ static inline void nmi_nesting_postprocess(void)
24201 dotraplinkage notrace __kprobes void
24202 do_nmi(struct pt_regs *regs, long error_code)
24203 {
24204+
24205+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
24206+ if (!user_mode(regs)) {
24207+ unsigned long cs = regs->cs & 0xFFFF;
24208+ unsigned long ip = ktva_ktla(regs->ip);
24209+
24210+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
24211+ regs->ip = ip;
24212+ }
24213+#endif
24214+
24215 nmi_nesting_preprocess(regs);
24216
24217 nmi_enter();
24218diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
24219index 6d9582e..f746287 100644
24220--- a/arch/x86/kernel/nmi_selftest.c
24221+++ b/arch/x86/kernel/nmi_selftest.c
24222@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
24223 {
24224 /* trap all the unknown NMIs we may generate */
24225 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
24226- __initdata);
24227+ __initconst);
24228 }
24229
24230 static void __init cleanup_nmi_testsuite(void)
24231@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
24232 unsigned long timeout;
24233
24234 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
24235- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
24236+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
24237 nmi_fail = FAILURE;
24238 return;
24239 }
24240diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
24241index 676b8c7..870ba04 100644
24242--- a/arch/x86/kernel/paravirt-spinlocks.c
24243+++ b/arch/x86/kernel/paravirt-spinlocks.c
24244@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
24245 arch_spin_lock(lock);
24246 }
24247
24248-struct pv_lock_ops pv_lock_ops = {
24249+struct pv_lock_ops pv_lock_ops __read_only = {
24250 #ifdef CONFIG_SMP
24251 .spin_is_locked = __ticket_spin_is_locked,
24252 .spin_is_contended = __ticket_spin_is_contended,
24253diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
24254index cd6de64..27c6af0 100644
24255--- a/arch/x86/kernel/paravirt.c
24256+++ b/arch/x86/kernel/paravirt.c
24257@@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
24258 {
24259 return x;
24260 }
24261+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
24262+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
24263+#endif
24264
24265 void __init default_banner(void)
24266 {
24267@@ -147,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
24268 if (opfunc == NULL)
24269 /* If there's no function, patch it with a ud2a (BUG) */
24270 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
24271- else if (opfunc == _paravirt_nop)
24272+ else if (opfunc == (void *)_paravirt_nop)
24273 /* If the operation is a nop, then nop the callsite */
24274 ret = paravirt_patch_nop();
24275
24276 /* identity functions just return their single argument */
24277- else if (opfunc == _paravirt_ident_32)
24278+ else if (opfunc == (void *)_paravirt_ident_32)
24279 ret = paravirt_patch_ident_32(insnbuf, len);
24280- else if (opfunc == _paravirt_ident_64)
24281+ else if (opfunc == (void *)_paravirt_ident_64)
24282 ret = paravirt_patch_ident_64(insnbuf, len);
24283+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
24284+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
24285+ ret = paravirt_patch_ident_64(insnbuf, len);
24286+#endif
24287
24288 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
24289 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
24290@@ -180,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
24291 if (insn_len > len || start == NULL)
24292 insn_len = len;
24293 else
24294- memcpy(insnbuf, start, insn_len);
24295+ memcpy(insnbuf, ktla_ktva(start), insn_len);
24296
24297 return insn_len;
24298 }
24299@@ -304,7 +311,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
24300 return this_cpu_read(paravirt_lazy_mode);
24301 }
24302
24303-struct pv_info pv_info = {
24304+struct pv_info pv_info __read_only = {
24305 .name = "bare hardware",
24306 .paravirt_enabled = 0,
24307 .kernel_rpl = 0,
24308@@ -315,16 +322,16 @@ struct pv_info pv_info = {
24309 #endif
24310 };
24311
24312-struct pv_init_ops pv_init_ops = {
24313+struct pv_init_ops pv_init_ops __read_only = {
24314 .patch = native_patch,
24315 };
24316
24317-struct pv_time_ops pv_time_ops = {
24318+struct pv_time_ops pv_time_ops __read_only = {
24319 .sched_clock = native_sched_clock,
24320 .steal_clock = native_steal_clock,
24321 };
24322
24323-struct pv_irq_ops pv_irq_ops = {
24324+struct pv_irq_ops pv_irq_ops __read_only = {
24325 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
24326 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
24327 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
24328@@ -336,7 +343,7 @@ struct pv_irq_ops pv_irq_ops = {
24329 #endif
24330 };
24331
24332-struct pv_cpu_ops pv_cpu_ops = {
24333+struct pv_cpu_ops pv_cpu_ops __read_only = {
24334 .cpuid = native_cpuid,
24335 .get_debugreg = native_get_debugreg,
24336 .set_debugreg = native_set_debugreg,
24337@@ -394,21 +401,26 @@ struct pv_cpu_ops pv_cpu_ops = {
24338 .end_context_switch = paravirt_nop,
24339 };
24340
24341-struct pv_apic_ops pv_apic_ops = {
24342+struct pv_apic_ops pv_apic_ops __read_only= {
24343 #ifdef CONFIG_X86_LOCAL_APIC
24344 .startup_ipi_hook = paravirt_nop,
24345 #endif
24346 };
24347
24348-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
24349+#ifdef CONFIG_X86_32
24350+#ifdef CONFIG_X86_PAE
24351+/* 64-bit pagetable entries */
24352+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
24353+#else
24354 /* 32-bit pagetable entries */
24355 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
24356+#endif
24357 #else
24358 /* 64-bit pagetable entries */
24359 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
24360 #endif
24361
24362-struct pv_mmu_ops pv_mmu_ops = {
24363+struct pv_mmu_ops pv_mmu_ops __read_only = {
24364
24365 .read_cr2 = native_read_cr2,
24366 .write_cr2 = native_write_cr2,
24367@@ -458,6 +470,7 @@ struct pv_mmu_ops pv_mmu_ops = {
24368 .make_pud = PTE_IDENT,
24369
24370 .set_pgd = native_set_pgd,
24371+ .set_pgd_batched = native_set_pgd_batched,
24372 #endif
24373 #endif /* PAGETABLE_LEVELS >= 3 */
24374
24375@@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
24376 },
24377
24378 .set_fixmap = native_set_fixmap,
24379+
24380+#ifdef CONFIG_PAX_KERNEXEC
24381+ .pax_open_kernel = native_pax_open_kernel,
24382+ .pax_close_kernel = native_pax_close_kernel,
24383+#endif
24384+
24385 };
24386
24387 EXPORT_SYMBOL_GPL(pv_time_ops);
24388diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
24389index 299d493..2ccb0ee 100644
24390--- a/arch/x86/kernel/pci-calgary_64.c
24391+++ b/arch/x86/kernel/pci-calgary_64.c
24392@@ -1339,7 +1339,7 @@ static void __init get_tce_space_from_tar(void)
24393 tce_space = be64_to_cpu(readq(target));
24394 tce_space = tce_space & TAR_SW_BITS;
24395
24396- tce_space = tce_space & (~specified_table_size);
24397+ tce_space = tce_space & (~(unsigned long)specified_table_size);
24398 info->tce_space = (u64 *)__va(tce_space);
24399 }
24400 }
24401diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
24402index 35ccf75..7a15747 100644
24403--- a/arch/x86/kernel/pci-iommu_table.c
24404+++ b/arch/x86/kernel/pci-iommu_table.c
24405@@ -2,7 +2,7 @@
24406 #include <asm/iommu_table.h>
24407 #include <linux/string.h>
24408 #include <linux/kallsyms.h>
24409-
24410+#include <linux/sched.h>
24411
24412 #define DEBUG 1
24413
24414diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
24415index 6c483ba..d10ce2f 100644
24416--- a/arch/x86/kernel/pci-swiotlb.c
24417+++ b/arch/x86/kernel/pci-swiotlb.c
24418@@ -32,7 +32,7 @@ static void x86_swiotlb_free_coherent(struct device *dev, size_t size,
24419 void *vaddr, dma_addr_t dma_addr,
24420 struct dma_attrs *attrs)
24421 {
24422- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
24423+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
24424 }
24425
24426 static struct dma_map_ops swiotlb_dma_ops = {
24427diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
24428index 81a5f5e..20f8b58 100644
24429--- a/arch/x86/kernel/process.c
24430+++ b/arch/x86/kernel/process.c
24431@@ -36,7 +36,8 @@
24432 * section. Since TSS's are completely CPU-local, we want them
24433 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
24434 */
24435-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
24436+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
24437+EXPORT_SYMBOL(init_tss);
24438
24439 #ifdef CONFIG_X86_64
24440 static DEFINE_PER_CPU(unsigned char, is_idle);
24441@@ -92,7 +93,7 @@ void arch_task_cache_init(void)
24442 task_xstate_cachep =
24443 kmem_cache_create("task_xstate", xstate_size,
24444 __alignof__(union thread_xstate),
24445- SLAB_PANIC | SLAB_NOTRACK, NULL);
24446+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
24447 }
24448
24449 /*
24450@@ -105,7 +106,7 @@ void exit_thread(void)
24451 unsigned long *bp = t->io_bitmap_ptr;
24452
24453 if (bp) {
24454- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
24455+ struct tss_struct *tss = init_tss + get_cpu();
24456
24457 t->io_bitmap_ptr = NULL;
24458 clear_thread_flag(TIF_IO_BITMAP);
24459@@ -125,6 +126,9 @@ void flush_thread(void)
24460 {
24461 struct task_struct *tsk = current;
24462
24463+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
24464+ loadsegment(gs, 0);
24465+#endif
24466 flush_ptrace_hw_breakpoint(tsk);
24467 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
24468 drop_init_fpu(tsk);
24469@@ -271,7 +275,7 @@ static void __exit_idle(void)
24470 void exit_idle(void)
24471 {
24472 /* idle loop has pid 0 */
24473- if (current->pid)
24474+ if (task_pid_nr(current))
24475 return;
24476 __exit_idle();
24477 }
24478@@ -327,7 +331,7 @@ bool xen_set_default_idle(void)
24479 return ret;
24480 }
24481 #endif
24482-void stop_this_cpu(void *dummy)
24483+__noreturn void stop_this_cpu(void *dummy)
24484 {
24485 local_irq_disable();
24486 /*
24487@@ -456,16 +460,37 @@ static int __init idle_setup(char *str)
24488 }
24489 early_param("idle", idle_setup);
24490
24491-unsigned long arch_align_stack(unsigned long sp)
24492+#ifdef CONFIG_PAX_RANDKSTACK
24493+void pax_randomize_kstack(struct pt_regs *regs)
24494 {
24495- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
24496- sp -= get_random_int() % 8192;
24497- return sp & ~0xf;
24498-}
24499+ struct thread_struct *thread = &current->thread;
24500+ unsigned long time;
24501
24502-unsigned long arch_randomize_brk(struct mm_struct *mm)
24503-{
24504- unsigned long range_end = mm->brk + 0x02000000;
24505- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
24506-}
24507+ if (!randomize_va_space)
24508+ return;
24509+
24510+ if (v8086_mode(regs))
24511+ return;
24512
24513+ rdtscl(time);
24514+
24515+ /* P4 seems to return a 0 LSB, ignore it */
24516+#ifdef CONFIG_MPENTIUM4
24517+ time &= 0x3EUL;
24518+ time <<= 2;
24519+#elif defined(CONFIG_X86_64)
24520+ time &= 0xFUL;
24521+ time <<= 4;
24522+#else
24523+ time &= 0x1FUL;
24524+ time <<= 3;
24525+#endif
24526+
24527+ thread->sp0 ^= time;
24528+ load_sp0(init_tss + smp_processor_id(), thread);
24529+
24530+#ifdef CONFIG_X86_64
24531+ this_cpu_write(kernel_stack, thread->sp0);
24532+#endif
24533+}
24534+#endif
24535diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
24536index 7305f7d..22f73d6 100644
24537--- a/arch/x86/kernel/process_32.c
24538+++ b/arch/x86/kernel/process_32.c
24539@@ -65,6 +65,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
24540 unsigned long thread_saved_pc(struct task_struct *tsk)
24541 {
24542 return ((unsigned long *)tsk->thread.sp)[3];
24543+//XXX return tsk->thread.eip;
24544 }
24545
24546 void __show_regs(struct pt_regs *regs, int all)
24547@@ -74,19 +75,18 @@ void __show_regs(struct pt_regs *regs, int all)
24548 unsigned long sp;
24549 unsigned short ss, gs;
24550
24551- if (user_mode_vm(regs)) {
24552+ if (user_mode(regs)) {
24553 sp = regs->sp;
24554 ss = regs->ss & 0xffff;
24555- gs = get_user_gs(regs);
24556 } else {
24557 sp = kernel_stack_pointer(regs);
24558 savesegment(ss, ss);
24559- savesegment(gs, gs);
24560 }
24561+ gs = get_user_gs(regs);
24562
24563 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
24564 (u16)regs->cs, regs->ip, regs->flags,
24565- smp_processor_id());
24566+ raw_smp_processor_id());
24567 print_symbol("EIP is at %s\n", regs->ip);
24568
24569 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
24570@@ -128,20 +128,21 @@ void release_thread(struct task_struct *dead_task)
24571 int copy_thread(unsigned long clone_flags, unsigned long sp,
24572 unsigned long arg, struct task_struct *p)
24573 {
24574- struct pt_regs *childregs = task_pt_regs(p);
24575+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
24576 struct task_struct *tsk;
24577 int err;
24578
24579 p->thread.sp = (unsigned long) childregs;
24580 p->thread.sp0 = (unsigned long) (childregs+1);
24581+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
24582
24583 if (unlikely(p->flags & PF_KTHREAD)) {
24584 /* kernel thread */
24585 memset(childregs, 0, sizeof(struct pt_regs));
24586 p->thread.ip = (unsigned long) ret_from_kernel_thread;
24587- task_user_gs(p) = __KERNEL_STACK_CANARY;
24588- childregs->ds = __USER_DS;
24589- childregs->es = __USER_DS;
24590+ savesegment(gs, childregs->gs);
24591+ childregs->ds = __KERNEL_DS;
24592+ childregs->es = __KERNEL_DS;
24593 childregs->fs = __KERNEL_PERCPU;
24594 childregs->bx = sp; /* function */
24595 childregs->bp = arg;
24596@@ -248,7 +249,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
24597 struct thread_struct *prev = &prev_p->thread,
24598 *next = &next_p->thread;
24599 int cpu = smp_processor_id();
24600- struct tss_struct *tss = &per_cpu(init_tss, cpu);
24601+ struct tss_struct *tss = init_tss + cpu;
24602 fpu_switch_t fpu;
24603
24604 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
24605@@ -272,6 +273,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
24606 */
24607 lazy_save_gs(prev->gs);
24608
24609+#ifdef CONFIG_PAX_MEMORY_UDEREF
24610+ __set_fs(task_thread_info(next_p)->addr_limit);
24611+#endif
24612+
24613 /*
24614 * Load the per-thread Thread-Local Storage descriptor.
24615 */
24616@@ -302,6 +307,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
24617 */
24618 arch_end_context_switch(next_p);
24619
24620+ this_cpu_write(current_task, next_p);
24621+ this_cpu_write(current_tinfo, &next_p->tinfo);
24622+
24623 /*
24624 * Restore %gs if needed (which is common)
24625 */
24626@@ -310,8 +318,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
24627
24628 switch_fpu_finish(next_p, fpu);
24629
24630- this_cpu_write(current_task, next_p);
24631-
24632 return prev_p;
24633 }
24634
24635@@ -341,4 +347,3 @@ unsigned long get_wchan(struct task_struct *p)
24636 } while (count++ < 16);
24637 return 0;
24638 }
24639-
24640diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
24641index 355ae06..560fbbe 100644
24642--- a/arch/x86/kernel/process_64.c
24643+++ b/arch/x86/kernel/process_64.c
24644@@ -151,10 +151,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
24645 struct pt_regs *childregs;
24646 struct task_struct *me = current;
24647
24648- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
24649+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
24650 childregs = task_pt_regs(p);
24651 p->thread.sp = (unsigned long) childregs;
24652 p->thread.usersp = me->thread.usersp;
24653+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
24654 set_tsk_thread_flag(p, TIF_FORK);
24655 p->fpu_counter = 0;
24656 p->thread.io_bitmap_ptr = NULL;
24657@@ -165,6 +166,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
24658 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
24659 savesegment(es, p->thread.es);
24660 savesegment(ds, p->thread.ds);
24661+ savesegment(ss, p->thread.ss);
24662+ BUG_ON(p->thread.ss == __UDEREF_KERNEL_DS);
24663 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
24664
24665 if (unlikely(p->flags & PF_KTHREAD)) {
24666@@ -273,7 +276,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
24667 struct thread_struct *prev = &prev_p->thread;
24668 struct thread_struct *next = &next_p->thread;
24669 int cpu = smp_processor_id();
24670- struct tss_struct *tss = &per_cpu(init_tss, cpu);
24671+ struct tss_struct *tss = init_tss + cpu;
24672 unsigned fsindex, gsindex;
24673 fpu_switch_t fpu;
24674
24675@@ -296,6 +299,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
24676 if (unlikely(next->ds | prev->ds))
24677 loadsegment(ds, next->ds);
24678
24679+ savesegment(ss, prev->ss);
24680+ if (unlikely(next->ss != prev->ss))
24681+ loadsegment(ss, next->ss);
24682
24683 /* We must save %fs and %gs before load_TLS() because
24684 * %fs and %gs may be cleared by load_TLS().
24685@@ -355,10 +361,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
24686 prev->usersp = this_cpu_read(old_rsp);
24687 this_cpu_write(old_rsp, next->usersp);
24688 this_cpu_write(current_task, next_p);
24689+ this_cpu_write(current_tinfo, &next_p->tinfo);
24690
24691- this_cpu_write(kernel_stack,
24692- (unsigned long)task_stack_page(next_p) +
24693- THREAD_SIZE - KERNEL_STACK_OFFSET);
24694+ this_cpu_write(kernel_stack, next->sp0);
24695
24696 /*
24697 * Now maybe reload the debug registers and handle I/O bitmaps
24698@@ -427,12 +432,11 @@ unsigned long get_wchan(struct task_struct *p)
24699 if (!p || p == current || p->state == TASK_RUNNING)
24700 return 0;
24701 stack = (unsigned long)task_stack_page(p);
24702- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
24703+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
24704 return 0;
24705 fp = *(u64 *)(p->thread.sp);
24706 do {
24707- if (fp < (unsigned long)stack ||
24708- fp >= (unsigned long)stack+THREAD_SIZE)
24709+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
24710 return 0;
24711 ip = *(u64 *)(fp+8);
24712 if (!in_sched_functions(ip))
24713diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
24714index 29a8120..a50b5ee 100644
24715--- a/arch/x86/kernel/ptrace.c
24716+++ b/arch/x86/kernel/ptrace.c
24717@@ -184,14 +184,13 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
24718 {
24719 unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
24720 unsigned long sp = (unsigned long)&regs->sp;
24721- struct thread_info *tinfo;
24722
24723- if (context == (sp & ~(THREAD_SIZE - 1)))
24724+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
24725 return sp;
24726
24727- tinfo = (struct thread_info *)context;
24728- if (tinfo->previous_esp)
24729- return tinfo->previous_esp;
24730+ sp = *(unsigned long *)context;
24731+ if (sp)
24732+ return sp;
24733
24734 return (unsigned long)regs;
24735 }
24736@@ -588,7 +587,7 @@ static void ptrace_triggered(struct perf_event *bp,
24737 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
24738 {
24739 int i;
24740- int dr7 = 0;
24741+ unsigned long dr7 = 0;
24742 struct arch_hw_breakpoint *info;
24743
24744 for (i = 0; i < HBP_NUM; i++) {
24745@@ -856,7 +855,7 @@ long arch_ptrace(struct task_struct *child, long request,
24746 unsigned long addr, unsigned long data)
24747 {
24748 int ret;
24749- unsigned long __user *datap = (unsigned long __user *)data;
24750+ unsigned long __user *datap = (__force unsigned long __user *)data;
24751
24752 switch (request) {
24753 /* read the word at location addr in the USER area. */
24754@@ -941,14 +940,14 @@ long arch_ptrace(struct task_struct *child, long request,
24755 if ((int) addr < 0)
24756 return -EIO;
24757 ret = do_get_thread_area(child, addr,
24758- (struct user_desc __user *)data);
24759+ (__force struct user_desc __user *) data);
24760 break;
24761
24762 case PTRACE_SET_THREAD_AREA:
24763 if ((int) addr < 0)
24764 return -EIO;
24765 ret = do_set_thread_area(child, addr,
24766- (struct user_desc __user *)data, 0);
24767+ (__force struct user_desc __user *) data, 0);
24768 break;
24769 #endif
24770
24771@@ -1326,7 +1325,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
24772
24773 #ifdef CONFIG_X86_64
24774
24775-static struct user_regset x86_64_regsets[] __read_mostly = {
24776+static user_regset_no_const x86_64_regsets[] __read_only = {
24777 [REGSET_GENERAL] = {
24778 .core_note_type = NT_PRSTATUS,
24779 .n = sizeof(struct user_regs_struct) / sizeof(long),
24780@@ -1367,7 +1366,7 @@ static const struct user_regset_view user_x86_64_view = {
24781 #endif /* CONFIG_X86_64 */
24782
24783 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
24784-static struct user_regset x86_32_regsets[] __read_mostly = {
24785+static user_regset_no_const x86_32_regsets[] __read_only = {
24786 [REGSET_GENERAL] = {
24787 .core_note_type = NT_PRSTATUS,
24788 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
24789@@ -1420,7 +1419,7 @@ static const struct user_regset_view user_x86_32_view = {
24790 */
24791 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
24792
24793-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
24794+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
24795 {
24796 #ifdef CONFIG_X86_64
24797 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
24798@@ -1455,7 +1454,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
24799 memset(info, 0, sizeof(*info));
24800 info->si_signo = SIGTRAP;
24801 info->si_code = si_code;
24802- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
24803+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
24804 }
24805
24806 void user_single_step_siginfo(struct task_struct *tsk,
24807@@ -1484,6 +1483,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
24808 # define IS_IA32 0
24809 #endif
24810
24811+#ifdef CONFIG_GRKERNSEC_SETXID
24812+extern void gr_delayed_cred_worker(void);
24813+#endif
24814+
24815 /*
24816 * We must return the syscall number to actually look up in the table.
24817 * This can be -1L to skip running any syscall at all.
24818@@ -1494,6 +1497,11 @@ long syscall_trace_enter(struct pt_regs *regs)
24819
24820 user_exit();
24821
24822+#ifdef CONFIG_GRKERNSEC_SETXID
24823+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
24824+ gr_delayed_cred_worker();
24825+#endif
24826+
24827 /*
24828 * If we stepped into a sysenter/syscall insn, it trapped in
24829 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
24830@@ -1549,6 +1557,11 @@ void syscall_trace_leave(struct pt_regs *regs)
24831 */
24832 user_exit();
24833
24834+#ifdef CONFIG_GRKERNSEC_SETXID
24835+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
24836+ gr_delayed_cred_worker();
24837+#endif
24838+
24839 audit_syscall_exit(regs);
24840
24841 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
24842diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
24843index 2cb9470..ff1fd80 100644
24844--- a/arch/x86/kernel/pvclock.c
24845+++ b/arch/x86/kernel/pvclock.c
24846@@ -43,11 +43,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
24847 return pv_tsc_khz;
24848 }
24849
24850-static atomic64_t last_value = ATOMIC64_INIT(0);
24851+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
24852
24853 void pvclock_resume(void)
24854 {
24855- atomic64_set(&last_value, 0);
24856+ atomic64_set_unchecked(&last_value, 0);
24857 }
24858
24859 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
24860@@ -92,11 +92,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
24861 * updating at the same time, and one of them could be slightly behind,
24862 * making the assumption that last_value always go forward fail to hold.
24863 */
24864- last = atomic64_read(&last_value);
24865+ last = atomic64_read_unchecked(&last_value);
24866 do {
24867 if (ret < last)
24868 return last;
24869- last = atomic64_cmpxchg(&last_value, last, ret);
24870+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
24871 } while (unlikely(last != ret));
24872
24873 return ret;
24874diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
24875index 76fa1e9..abf09ea 100644
24876--- a/arch/x86/kernel/reboot.c
24877+++ b/arch/x86/kernel/reboot.c
24878@@ -36,7 +36,7 @@ void (*pm_power_off)(void);
24879 EXPORT_SYMBOL(pm_power_off);
24880
24881 static const struct desc_ptr no_idt = {};
24882-static int reboot_mode;
24883+static unsigned short reboot_mode;
24884 enum reboot_type reboot_type = BOOT_ACPI;
24885 int reboot_force;
24886
24887@@ -157,6 +157,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
24888
24889 void __noreturn machine_real_restart(unsigned int type)
24890 {
24891+
24892+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
24893+ struct desc_struct *gdt;
24894+#endif
24895+
24896 local_irq_disable();
24897
24898 /*
24899@@ -184,7 +189,29 @@ void __noreturn machine_real_restart(unsigned int type)
24900
24901 /* Jump to the identity-mapped low memory code */
24902 #ifdef CONFIG_X86_32
24903- asm volatile("jmpl *%0" : :
24904+
24905+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
24906+ gdt = get_cpu_gdt_table(smp_processor_id());
24907+ pax_open_kernel();
24908+#ifdef CONFIG_PAX_MEMORY_UDEREF
24909+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
24910+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
24911+ loadsegment(ds, __KERNEL_DS);
24912+ loadsegment(es, __KERNEL_DS);
24913+ loadsegment(ss, __KERNEL_DS);
24914+#endif
24915+#ifdef CONFIG_PAX_KERNEXEC
24916+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
24917+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
24918+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
24919+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
24920+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
24921+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
24922+#endif
24923+ pax_close_kernel();
24924+#endif
24925+
24926+ asm volatile("ljmpl *%0" : :
24927 "rm" (real_mode_header->machine_real_restart_asm),
24928 "a" (type));
24929 #else
24930@@ -531,7 +558,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
24931 * try to force a triple fault and then cycle between hitting the keyboard
24932 * controller and doing that
24933 */
24934-static void native_machine_emergency_restart(void)
24935+static void __noreturn native_machine_emergency_restart(void)
24936 {
24937 int i;
24938 int attempt = 0;
24939@@ -654,13 +681,13 @@ void native_machine_shutdown(void)
24940 #endif
24941 }
24942
24943-static void __machine_emergency_restart(int emergency)
24944+static void __noreturn __machine_emergency_restart(int emergency)
24945 {
24946 reboot_emergency = emergency;
24947 machine_ops.emergency_restart();
24948 }
24949
24950-static void native_machine_restart(char *__unused)
24951+static void __noreturn native_machine_restart(char *__unused)
24952 {
24953 pr_notice("machine restart\n");
24954
24955@@ -669,7 +696,7 @@ static void native_machine_restart(char *__unused)
24956 __machine_emergency_restart(0);
24957 }
24958
24959-static void native_machine_halt(void)
24960+static void __noreturn native_machine_halt(void)
24961 {
24962 /* Stop other cpus and apics */
24963 machine_shutdown();
24964@@ -679,7 +706,7 @@ static void native_machine_halt(void)
24965 stop_this_cpu(NULL);
24966 }
24967
24968-static void native_machine_power_off(void)
24969+static void __noreturn native_machine_power_off(void)
24970 {
24971 if (pm_power_off) {
24972 if (!reboot_force)
24973@@ -688,9 +715,10 @@ static void native_machine_power_off(void)
24974 }
24975 /* A fallback in case there is no PM info available */
24976 tboot_shutdown(TB_SHUTDOWN_HALT);
24977+ unreachable();
24978 }
24979
24980-struct machine_ops machine_ops = {
24981+struct machine_ops machine_ops __read_only = {
24982 .power_off = native_machine_power_off,
24983 .shutdown = native_machine_shutdown,
24984 .emergency_restart = native_machine_emergency_restart,
24985diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
24986index c8e41e9..64049ef 100644
24987--- a/arch/x86/kernel/reboot_fixups_32.c
24988+++ b/arch/x86/kernel/reboot_fixups_32.c
24989@@ -57,7 +57,7 @@ struct device_fixup {
24990 unsigned int vendor;
24991 unsigned int device;
24992 void (*reboot_fixup)(struct pci_dev *);
24993-};
24994+} __do_const;
24995
24996 /*
24997 * PCI ids solely used for fixups_table go here
24998diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
24999index f2bb9c9..bed145d7 100644
25000--- a/arch/x86/kernel/relocate_kernel_64.S
25001+++ b/arch/x86/kernel/relocate_kernel_64.S
25002@@ -11,6 +11,7 @@
25003 #include <asm/kexec.h>
25004 #include <asm/processor-flags.h>
25005 #include <asm/pgtable_types.h>
25006+#include <asm/alternative-asm.h>
25007
25008 /*
25009 * Must be relocatable PIC code callable as a C function
25010@@ -167,6 +168,7 @@ identity_mapped:
25011 xorq %r14, %r14
25012 xorq %r15, %r15
25013
25014+ pax_force_retaddr 0, 1
25015 ret
25016
25017 1:
25018diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
25019index 56f7fcf..2cfe4f1 100644
25020--- a/arch/x86/kernel/setup.c
25021+++ b/arch/x86/kernel/setup.c
25022@@ -110,6 +110,7 @@
25023 #include <asm/mce.h>
25024 #include <asm/alternative.h>
25025 #include <asm/prom.h>
25026+#include <asm/boot.h>
25027
25028 /*
25029 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
25030@@ -205,12 +206,50 @@ EXPORT_SYMBOL(boot_cpu_data);
25031 #endif
25032
25033
25034-#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
25035-unsigned long mmu_cr4_features;
25036+#ifdef CONFIG_X86_64
25037+unsigned long mmu_cr4_features __read_only = X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE;
25038+#elif defined(CONFIG_X86_PAE)
25039+unsigned long mmu_cr4_features __read_only = X86_CR4_PAE;
25040 #else
25041-unsigned long mmu_cr4_features = X86_CR4_PAE;
25042+unsigned long mmu_cr4_features __read_only;
25043 #endif
25044
25045+void set_in_cr4(unsigned long mask)
25046+{
25047+ unsigned long cr4 = read_cr4();
25048+
25049+ if ((cr4 & mask) == mask && cr4 == mmu_cr4_features)
25050+ return;
25051+
25052+ pax_open_kernel();
25053+ mmu_cr4_features |= mask;
25054+ pax_close_kernel();
25055+
25056+ if (trampoline_cr4_features)
25057+ *trampoline_cr4_features = mmu_cr4_features;
25058+ cr4 |= mask;
25059+ write_cr4(cr4);
25060+}
25061+EXPORT_SYMBOL(set_in_cr4);
25062+
25063+void clear_in_cr4(unsigned long mask)
25064+{
25065+ unsigned long cr4 = read_cr4();
25066+
25067+ if (!(cr4 & mask) && cr4 == mmu_cr4_features)
25068+ return;
25069+
25070+ pax_open_kernel();
25071+ mmu_cr4_features &= ~mask;
25072+ pax_close_kernel();
25073+
25074+ if (trampoline_cr4_features)
25075+ *trampoline_cr4_features = mmu_cr4_features;
25076+ cr4 &= ~mask;
25077+ write_cr4(cr4);
25078+}
25079+EXPORT_SYMBOL(clear_in_cr4);
25080+
25081 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
25082 int bootloader_type, bootloader_version;
25083
25084@@ -444,7 +483,7 @@ static void __init parse_setup_data(void)
25085
25086 switch (data->type) {
25087 case SETUP_E820_EXT:
25088- parse_e820_ext(data);
25089+ parse_e820_ext((struct setup_data __force_kernel *)data);
25090 break;
25091 case SETUP_DTB:
25092 add_dtb(pa_data);
25093@@ -771,7 +810,7 @@ static void __init trim_bios_range(void)
25094 * area (640->1Mb) as ram even though it is not.
25095 * take them out.
25096 */
25097- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
25098+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
25099
25100 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
25101 }
25102@@ -779,7 +818,7 @@ static void __init trim_bios_range(void)
25103 /* called before trim_bios_range() to spare extra sanitize */
25104 static void __init e820_add_kernel_range(void)
25105 {
25106- u64 start = __pa_symbol(_text);
25107+ u64 start = __pa_symbol(ktla_ktva(_text));
25108 u64 size = __pa_symbol(_end) - start;
25109
25110 /*
25111@@ -841,8 +880,12 @@ static void __init trim_low_memory_range(void)
25112
25113 void __init setup_arch(char **cmdline_p)
25114 {
25115+#ifdef CONFIG_X86_32
25116+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR);
25117+#else
25118 memblock_reserve(__pa_symbol(_text),
25119 (unsigned long)__bss_stop - (unsigned long)_text);
25120+#endif
25121
25122 early_reserve_initrd();
25123
25124@@ -934,14 +977,14 @@ void __init setup_arch(char **cmdline_p)
25125
25126 if (!boot_params.hdr.root_flags)
25127 root_mountflags &= ~MS_RDONLY;
25128- init_mm.start_code = (unsigned long) _text;
25129- init_mm.end_code = (unsigned long) _etext;
25130+ init_mm.start_code = ktla_ktva((unsigned long) _text);
25131+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
25132 init_mm.end_data = (unsigned long) _edata;
25133 init_mm.brk = _brk_end;
25134
25135- code_resource.start = __pa_symbol(_text);
25136- code_resource.end = __pa_symbol(_etext)-1;
25137- data_resource.start = __pa_symbol(_etext);
25138+ code_resource.start = __pa_symbol(ktla_ktva(_text));
25139+ code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
25140+ data_resource.start = __pa_symbol(_sdata);
25141 data_resource.end = __pa_symbol(_edata)-1;
25142 bss_resource.start = __pa_symbol(__bss_start);
25143 bss_resource.end = __pa_symbol(__bss_stop)-1;
25144diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
25145index 5cdff03..80fa283 100644
25146--- a/arch/x86/kernel/setup_percpu.c
25147+++ b/arch/x86/kernel/setup_percpu.c
25148@@ -21,19 +21,17 @@
25149 #include <asm/cpu.h>
25150 #include <asm/stackprotector.h>
25151
25152-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
25153+#ifdef CONFIG_SMP
25154+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
25155 EXPORT_PER_CPU_SYMBOL(cpu_number);
25156+#endif
25157
25158-#ifdef CONFIG_X86_64
25159 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
25160-#else
25161-#define BOOT_PERCPU_OFFSET 0
25162-#endif
25163
25164 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
25165 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
25166
25167-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
25168+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
25169 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
25170 };
25171 EXPORT_SYMBOL(__per_cpu_offset);
25172@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
25173 {
25174 #ifdef CONFIG_NEED_MULTIPLE_NODES
25175 pg_data_t *last = NULL;
25176- unsigned int cpu;
25177+ int cpu;
25178
25179 for_each_possible_cpu(cpu) {
25180 int node = early_cpu_to_node(cpu);
25181@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
25182 {
25183 #ifdef CONFIG_X86_32
25184 struct desc_struct gdt;
25185+ unsigned long base = per_cpu_offset(cpu);
25186
25187- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
25188- 0x2 | DESCTYPE_S, 0x8);
25189- gdt.s = 1;
25190+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
25191+ 0x83 | DESCTYPE_S, 0xC);
25192 write_gdt_entry(get_cpu_gdt_table(cpu),
25193 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
25194 #endif
25195@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
25196 /* alrighty, percpu areas up and running */
25197 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
25198 for_each_possible_cpu(cpu) {
25199+#ifdef CONFIG_CC_STACKPROTECTOR
25200+#ifdef CONFIG_X86_32
25201+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
25202+#endif
25203+#endif
25204 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
25205 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
25206 per_cpu(cpu_number, cpu) = cpu;
25207@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
25208 */
25209 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
25210 #endif
25211+#ifdef CONFIG_CC_STACKPROTECTOR
25212+#ifdef CONFIG_X86_32
25213+ if (!cpu)
25214+ per_cpu(stack_canary.canary, cpu) = canary;
25215+#endif
25216+#endif
25217 /*
25218 * Up to this point, the boot CPU has been using .init.data
25219 * area. Reload any changed state for the boot CPU.
25220diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
25221index 6956299..18126ec4 100644
25222--- a/arch/x86/kernel/signal.c
25223+++ b/arch/x86/kernel/signal.c
25224@@ -196,7 +196,7 @@ static unsigned long align_sigframe(unsigned long sp)
25225 * Align the stack pointer according to the i386 ABI,
25226 * i.e. so that on function entry ((sp + 4) & 15) == 0.
25227 */
25228- sp = ((sp + 4) & -16ul) - 4;
25229+ sp = ((sp - 12) & -16ul) - 4;
25230 #else /* !CONFIG_X86_32 */
25231 sp = round_down(sp, 16) - 8;
25232 #endif
25233@@ -304,9 +304,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
25234 }
25235
25236 if (current->mm->context.vdso)
25237- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
25238+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
25239 else
25240- restorer = &frame->retcode;
25241+ restorer = (void __user *)&frame->retcode;
25242 if (ksig->ka.sa.sa_flags & SA_RESTORER)
25243 restorer = ksig->ka.sa.sa_restorer;
25244
25245@@ -320,7 +320,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
25246 * reasons and because gdb uses it as a signature to notice
25247 * signal handler stack frames.
25248 */
25249- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
25250+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
25251
25252 if (err)
25253 return -EFAULT;
25254@@ -364,10 +364,13 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
25255 else
25256 put_user_ex(0, &frame->uc.uc_flags);
25257 put_user_ex(0, &frame->uc.uc_link);
25258- err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
25259+ __save_altstack_ex(&frame->uc.uc_stack, regs->sp);
25260
25261 /* Set up to return from userspace. */
25262- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
25263+ if (current->mm->context.vdso)
25264+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
25265+ else
25266+ restorer = (void __user *)&frame->retcode;
25267 if (ksig->ka.sa.sa_flags & SA_RESTORER)
25268 restorer = ksig->ka.sa.sa_restorer;
25269 put_user_ex(restorer, &frame->pretcode);
25270@@ -379,7 +382,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
25271 * reasons and because gdb uses it as a signature to notice
25272 * signal handler stack frames.
25273 */
25274- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
25275+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
25276 } put_user_catch(err);
25277
25278 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
25279@@ -429,7 +432,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
25280 else
25281 put_user_ex(0, &frame->uc.uc_flags);
25282 put_user_ex(0, &frame->uc.uc_link);
25283- err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
25284+ __save_altstack_ex(&frame->uc.uc_stack, regs->sp);
25285
25286 /* Set up to return from userspace. If provided, use a stub
25287 already in userspace. */
25288@@ -615,7 +618,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
25289 {
25290 int usig = signr_convert(ksig->sig);
25291 sigset_t *set = sigmask_to_save();
25292- compat_sigset_t *cset = (compat_sigset_t *) set;
25293+ sigset_t sigcopy;
25294+ compat_sigset_t *cset;
25295+
25296+ sigcopy = *set;
25297+
25298+ cset = (compat_sigset_t *) &sigcopy;
25299
25300 /* Set up the stack frame */
25301 if (is_ia32_frame()) {
25302@@ -626,7 +634,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
25303 } else if (is_x32_frame()) {
25304 return x32_setup_rt_frame(ksig, cset, regs);
25305 } else {
25306- return __setup_rt_frame(ksig->sig, ksig, set, regs);
25307+ return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
25308 }
25309 }
25310
25311diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
25312index 48d2b7d..90d328a 100644
25313--- a/arch/x86/kernel/smp.c
25314+++ b/arch/x86/kernel/smp.c
25315@@ -285,7 +285,7 @@ static int __init nonmi_ipi_setup(char *str)
25316
25317 __setup("nonmi_ipi", nonmi_ipi_setup);
25318
25319-struct smp_ops smp_ops = {
25320+struct smp_ops smp_ops __read_only = {
25321 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
25322 .smp_prepare_cpus = native_smp_prepare_cpus,
25323 .smp_cpus_done = native_smp_cpus_done,
25324diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
25325index bfd348e..914f323 100644
25326--- a/arch/x86/kernel/smpboot.c
25327+++ b/arch/x86/kernel/smpboot.c
25328@@ -251,14 +251,18 @@ notrace static void __cpuinit start_secondary(void *unused)
25329
25330 enable_start_cpu0 = 0;
25331
25332-#ifdef CONFIG_X86_32
25333- /* switch away from the initial page table */
25334- load_cr3(swapper_pg_dir);
25335- __flush_tlb_all();
25336-#endif
25337-
25338 /* otherwise gcc will move up smp_processor_id before the cpu_init */
25339 barrier();
25340+
25341+ /* switch away from the initial page table */
25342+#ifdef CONFIG_PAX_PER_CPU_PGD
25343+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
25344+ __flush_tlb_all();
25345+#elif defined(CONFIG_X86_32)
25346+ load_cr3(swapper_pg_dir);
25347+ __flush_tlb_all();
25348+#endif
25349+
25350 /*
25351 * Check TSC synchronization with the BP:
25352 */
25353@@ -748,6 +752,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
25354 idle->thread.sp = (unsigned long) (((struct pt_regs *)
25355 (THREAD_SIZE + task_stack_page(idle))) - 1);
25356 per_cpu(current_task, cpu) = idle;
25357+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
25358
25359 #ifdef CONFIG_X86_32
25360 /* Stack for startup_32 can be just as for start_secondary onwards */
25361@@ -755,11 +760,13 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
25362 #else
25363 clear_tsk_thread_flag(idle, TIF_FORK);
25364 initial_gs = per_cpu_offset(cpu);
25365- per_cpu(kernel_stack, cpu) =
25366- (unsigned long)task_stack_page(idle) -
25367- KERNEL_STACK_OFFSET + THREAD_SIZE;
25368+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
25369 #endif
25370+
25371+ pax_open_kernel();
25372 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
25373+ pax_close_kernel();
25374+
25375 initial_code = (unsigned long)start_secondary;
25376 stack_start = idle->thread.sp;
25377
25378@@ -908,6 +915,15 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
25379 /* the FPU context is blank, nobody can own it */
25380 __cpu_disable_lazy_restore(cpu);
25381
25382+#ifdef CONFIG_PAX_PER_CPU_PGD
25383+ clone_pgd_range(get_cpu_pgd(cpu, kernel) + KERNEL_PGD_BOUNDARY,
25384+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25385+ KERNEL_PGD_PTRS);
25386+ clone_pgd_range(get_cpu_pgd(cpu, user) + KERNEL_PGD_BOUNDARY,
25387+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25388+ KERNEL_PGD_PTRS);
25389+#endif
25390+
25391 err = do_boot_cpu(apicid, cpu, tidle);
25392 if (err) {
25393 pr_debug("do_boot_cpu failed %d\n", err);
25394diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
25395index 9b4d51d..5d28b58 100644
25396--- a/arch/x86/kernel/step.c
25397+++ b/arch/x86/kernel/step.c
25398@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
25399 struct desc_struct *desc;
25400 unsigned long base;
25401
25402- seg &= ~7UL;
25403+ seg >>= 3;
25404
25405 mutex_lock(&child->mm->context.lock);
25406- if (unlikely((seg >> 3) >= child->mm->context.size))
25407+ if (unlikely(seg >= child->mm->context.size))
25408 addr = -1L; /* bogus selector, access would fault */
25409 else {
25410 desc = child->mm->context.ldt + seg;
25411@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
25412 addr += base;
25413 }
25414 mutex_unlock(&child->mm->context.lock);
25415- }
25416+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
25417+ addr = ktla_ktva(addr);
25418
25419 return addr;
25420 }
25421@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
25422 unsigned char opcode[15];
25423 unsigned long addr = convert_ip_to_linear(child, regs);
25424
25425+ if (addr == -EINVAL)
25426+ return 0;
25427+
25428 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
25429 for (i = 0; i < copied; i++) {
25430 switch (opcode[i]) {
25431diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
25432new file mode 100644
25433index 0000000..5877189
25434--- /dev/null
25435+++ b/arch/x86/kernel/sys_i386_32.c
25436@@ -0,0 +1,189 @@
25437+/*
25438+ * This file contains various random system calls that
25439+ * have a non-standard calling sequence on the Linux/i386
25440+ * platform.
25441+ */
25442+
25443+#include <linux/errno.h>
25444+#include <linux/sched.h>
25445+#include <linux/mm.h>
25446+#include <linux/fs.h>
25447+#include <linux/smp.h>
25448+#include <linux/sem.h>
25449+#include <linux/msg.h>
25450+#include <linux/shm.h>
25451+#include <linux/stat.h>
25452+#include <linux/syscalls.h>
25453+#include <linux/mman.h>
25454+#include <linux/file.h>
25455+#include <linux/utsname.h>
25456+#include <linux/ipc.h>
25457+#include <linux/elf.h>
25458+
25459+#include <linux/uaccess.h>
25460+#include <linux/unistd.h>
25461+
25462+#include <asm/syscalls.h>
25463+
25464+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
25465+{
25466+ unsigned long pax_task_size = TASK_SIZE;
25467+
25468+#ifdef CONFIG_PAX_SEGMEXEC
25469+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
25470+ pax_task_size = SEGMEXEC_TASK_SIZE;
25471+#endif
25472+
25473+ if (flags & MAP_FIXED)
25474+ if (len > pax_task_size || addr > pax_task_size - len)
25475+ return -EINVAL;
25476+
25477+ return 0;
25478+}
25479+
25480+/*
25481+ * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
25482+ */
25483+static unsigned long get_align_mask(void)
25484+{
25485+ if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32))
25486+ return 0;
25487+
25488+ if (!(current->flags & PF_RANDOMIZE))
25489+ return 0;
25490+
25491+ return va_align.mask;
25492+}
25493+
25494+unsigned long
25495+arch_get_unmapped_area(struct file *filp, unsigned long addr,
25496+ unsigned long len, unsigned long pgoff, unsigned long flags)
25497+{
25498+ struct mm_struct *mm = current->mm;
25499+ struct vm_area_struct *vma;
25500+ unsigned long pax_task_size = TASK_SIZE;
25501+ struct vm_unmapped_area_info info;
25502+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
25503+
25504+#ifdef CONFIG_PAX_SEGMEXEC
25505+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25506+ pax_task_size = SEGMEXEC_TASK_SIZE;
25507+#endif
25508+
25509+ pax_task_size -= PAGE_SIZE;
25510+
25511+ if (len > pax_task_size)
25512+ return -ENOMEM;
25513+
25514+ if (flags & MAP_FIXED)
25515+ return addr;
25516+
25517+#ifdef CONFIG_PAX_RANDMMAP
25518+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
25519+#endif
25520+
25521+ if (addr) {
25522+ addr = PAGE_ALIGN(addr);
25523+ if (pax_task_size - len >= addr) {
25524+ vma = find_vma(mm, addr);
25525+ if (check_heap_stack_gap(vma, addr, len, offset))
25526+ return addr;
25527+ }
25528+ }
25529+
25530+ info.flags = 0;
25531+ info.length = len;
25532+ info.align_mask = filp ? get_align_mask() : 0;
25533+ info.align_offset = pgoff << PAGE_SHIFT;
25534+ info.threadstack_offset = offset;
25535+
25536+#ifdef CONFIG_PAX_PAGEEXEC
25537+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) {
25538+ info.low_limit = 0x00110000UL;
25539+ info.high_limit = mm->start_code;
25540+
25541+#ifdef CONFIG_PAX_RANDMMAP
25542+ if (mm->pax_flags & MF_PAX_RANDMMAP)
25543+ info.low_limit += mm->delta_mmap & 0x03FFF000UL;
25544+#endif
25545+
25546+ if (info.low_limit < info.high_limit) {
25547+ addr = vm_unmapped_area(&info);
25548+ if (!IS_ERR_VALUE(addr))
25549+ return addr;
25550+ }
25551+ } else
25552+#endif
25553+
25554+ info.low_limit = mm->mmap_base;
25555+ info.high_limit = pax_task_size;
25556+
25557+ return vm_unmapped_area(&info);
25558+}
25559+
25560+unsigned long
25561+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
25562+ const unsigned long len, const unsigned long pgoff,
25563+ const unsigned long flags)
25564+{
25565+ struct vm_area_struct *vma;
25566+ struct mm_struct *mm = current->mm;
25567+ unsigned long addr = addr0, pax_task_size = TASK_SIZE;
25568+ struct vm_unmapped_area_info info;
25569+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
25570+
25571+#ifdef CONFIG_PAX_SEGMEXEC
25572+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25573+ pax_task_size = SEGMEXEC_TASK_SIZE;
25574+#endif
25575+
25576+ pax_task_size -= PAGE_SIZE;
25577+
25578+ /* requested length too big for entire address space */
25579+ if (len > pax_task_size)
25580+ return -ENOMEM;
25581+
25582+ if (flags & MAP_FIXED)
25583+ return addr;
25584+
25585+#ifdef CONFIG_PAX_PAGEEXEC
25586+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
25587+ goto bottomup;
25588+#endif
25589+
25590+#ifdef CONFIG_PAX_RANDMMAP
25591+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
25592+#endif
25593+
25594+ /* requesting a specific address */
25595+ if (addr) {
25596+ addr = PAGE_ALIGN(addr);
25597+ if (pax_task_size - len >= addr) {
25598+ vma = find_vma(mm, addr);
25599+ if (check_heap_stack_gap(vma, addr, len, offset))
25600+ return addr;
25601+ }
25602+ }
25603+
25604+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
25605+ info.length = len;
25606+ info.low_limit = PAGE_SIZE;
25607+ info.high_limit = mm->mmap_base;
25608+ info.align_mask = filp ? get_align_mask() : 0;
25609+ info.align_offset = pgoff << PAGE_SHIFT;
25610+ info.threadstack_offset = offset;
25611+
25612+ addr = vm_unmapped_area(&info);
25613+ if (!(addr & ~PAGE_MASK))
25614+ return addr;
25615+ VM_BUG_ON(addr != -ENOMEM);
25616+
25617+bottomup:
25618+ /*
25619+ * A failed mmap() very likely causes application failure,
25620+ * so fall back to the bottom-up function here. This scenario
25621+ * can happen with large stack limits and large mmap()
25622+ * allocations.
25623+ */
25624+ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
25625+}
25626diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
e2b79cd1 25627index 30277e2..5664a29 100644
bb5f0bf8
AF
25628--- a/arch/x86/kernel/sys_x86_64.c
25629+++ b/arch/x86/kernel/sys_x86_64.c
25630@@ -81,8 +81,8 @@ out:
25631 return error;
25632 }
25633
25634-static void find_start_end(unsigned long flags, unsigned long *begin,
25635- unsigned long *end)
25636+static void find_start_end(struct mm_struct *mm, unsigned long flags,
25637+ unsigned long *begin, unsigned long *end)
25638 {
25639 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
25640 unsigned long new_begin;
25641@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
25642 *begin = new_begin;
25643 }
25644 } else {
e2b79cd1
AF
25645- *begin = current->mm->mmap_legacy_base;
25646+ *begin = mm->mmap_legacy_base;
bb5f0bf8
AF
25647 *end = TASK_SIZE;
25648 }
25649 }
25650@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
25651 struct vm_area_struct *vma;
25652 struct vm_unmapped_area_info info;
25653 unsigned long begin, end;
25654+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
25655
25656 if (flags & MAP_FIXED)
25657 return addr;
25658
25659- find_start_end(flags, &begin, &end);
25660+ find_start_end(mm, flags, &begin, &end);
25661
25662 if (len > end)
25663 return -ENOMEM;
25664
25665+#ifdef CONFIG_PAX_RANDMMAP
25666+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
25667+#endif
25668+
25669 if (addr) {
25670 addr = PAGE_ALIGN(addr);
25671 vma = find_vma(mm, addr);
25672- if (end - len >= addr &&
25673- (!vma || addr + len <= vma->vm_start))
25674+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
25675 return addr;
25676 }
25677
25678@@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
25679 info.high_limit = end;
25680 info.align_mask = filp ? get_align_mask() : 0;
25681 info.align_offset = pgoff << PAGE_SHIFT;
25682+ info.threadstack_offset = offset;
25683 return vm_unmapped_area(&info);
25684 }
25685
25686@@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
25687 struct mm_struct *mm = current->mm;
25688 unsigned long addr = addr0;
25689 struct vm_unmapped_area_info info;
25690+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
25691
25692 /* requested length too big for entire address space */
25693 if (len > TASK_SIZE)
25694@@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
25695 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
25696 goto bottomup;
25697
25698+#ifdef CONFIG_PAX_RANDMMAP
25699+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
25700+#endif
25701+
25702 /* requesting a specific address */
25703 if (addr) {
25704 addr = PAGE_ALIGN(addr);
25705 vma = find_vma(mm, addr);
25706- if (TASK_SIZE - len >= addr &&
25707- (!vma || addr + len <= vma->vm_start))
25708+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
25709 return addr;
25710 }
25711
25712@@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
25713 info.high_limit = mm->mmap_base;
25714 info.align_mask = filp ? get_align_mask() : 0;
25715 info.align_offset = pgoff << PAGE_SHIFT;
25716+ info.threadstack_offset = offset;
25717 addr = vm_unmapped_area(&info);
25718 if (!(addr & ~PAGE_MASK))
25719 return addr;
25720diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
25721index f84fe00..f41d9f1 100644
25722--- a/arch/x86/kernel/tboot.c
25723+++ b/arch/x86/kernel/tboot.c
25724@@ -220,7 +220,7 @@ static int tboot_setup_sleep(void)
25725
25726 void tboot_shutdown(u32 shutdown_type)
25727 {
25728- void (*shutdown)(void);
25729+ void (* __noreturn shutdown)(void);
25730
25731 if (!tboot_enabled())
25732 return;
25733@@ -242,7 +242,7 @@ void tboot_shutdown(u32 shutdown_type)
25734
25735 switch_to_tboot_pt();
25736
25737- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
25738+ shutdown = (void *)tboot->shutdown_entry;
25739 shutdown();
25740
25741 /* should not reach here */
25742@@ -300,7 +300,7 @@ static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
25743 return 0;
25744 }
25745
25746-static atomic_t ap_wfs_count;
25747+static atomic_unchecked_t ap_wfs_count;
25748
25749 static int tboot_wait_for_aps(int num_aps)
25750 {
25751@@ -324,16 +324,16 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
25752 {
25753 switch (action) {
25754 case CPU_DYING:
25755- atomic_inc(&ap_wfs_count);
25756+ atomic_inc_unchecked(&ap_wfs_count);
25757 if (num_online_cpus() == 1)
25758- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
25759+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
25760 return NOTIFY_BAD;
25761 break;
25762 }
25763 return NOTIFY_OK;
25764 }
25765
25766-static struct notifier_block tboot_cpu_notifier __cpuinitdata =
25767+static struct notifier_block tboot_cpu_notifier =
25768 {
25769 .notifier_call = tboot_cpu_callback,
25770 };
25771@@ -345,7 +345,7 @@ static __init int tboot_late_init(void)
25772
25773 tboot_create_trampoline();
25774
25775- atomic_set(&ap_wfs_count, 0);
25776+ atomic_set_unchecked(&ap_wfs_count, 0);
25777 register_hotcpu_notifier(&tboot_cpu_notifier);
25778
25779 acpi_os_set_prepare_sleep(&tboot_sleep);
25780diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
25781index 24d3c91..d06b473 100644
25782--- a/arch/x86/kernel/time.c
25783+++ b/arch/x86/kernel/time.c
25784@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
25785 {
25786 unsigned long pc = instruction_pointer(regs);
25787
25788- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
25789+ if (!user_mode(regs) && in_lock_functions(pc)) {
25790 #ifdef CONFIG_FRAME_POINTER
25791- return *(unsigned long *)(regs->bp + sizeof(long));
25792+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
25793 #else
25794 unsigned long *sp =
25795 (unsigned long *)kernel_stack_pointer(regs);
25796@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
25797 * or above a saved flags. Eflags has bits 22-31 zero,
25798 * kernel addresses don't.
25799 */
25800+
25801+#ifdef CONFIG_PAX_KERNEXEC
25802+ return ktla_ktva(sp[0]);
25803+#else
25804 if (sp[0] >> 22)
25805 return sp[0];
25806 if (sp[1] >> 22)
25807 return sp[1];
25808 #endif
25809+
25810+#endif
25811 }
25812 return pc;
25813 }
25814diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
25815index f7fec09..9991981 100644
25816--- a/arch/x86/kernel/tls.c
25817+++ b/arch/x86/kernel/tls.c
25818@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
25819 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
25820 return -EINVAL;
25821
25822+#ifdef CONFIG_PAX_SEGMEXEC
25823+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
25824+ return -EINVAL;
25825+#endif
25826+
25827 set_tls_desc(p, idx, &info, 1);
25828
25829 return 0;
25830@@ -200,7 +205,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
25831
25832 if (kbuf)
25833 info = kbuf;
25834- else if (__copy_from_user(infobuf, ubuf, count))
25835+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
25836 return -EFAULT;
25837 else
25838 info = infobuf;
25839diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
25840index 772e2a8..bad5bf6 100644
25841--- a/arch/x86/kernel/traps.c
25842+++ b/arch/x86/kernel/traps.c
25843@@ -68,12 +68,6 @@
25844 #include <asm/setup.h>
25845
25846 asmlinkage int system_call(void);
25847-
25848-/*
25849- * The IDT has to be page-aligned to simplify the Pentium
25850- * F0 0F bug workaround.
25851- */
25852-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
25853 #endif
25854
25855 DECLARE_BITMAP(used_vectors, NR_VECTORS);
25856@@ -106,11 +100,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
25857 }
25858
25859 static int __kprobes
25860-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
25861+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
25862 struct pt_regs *regs, long error_code)
25863 {
25864 #ifdef CONFIG_X86_32
25865- if (regs->flags & X86_VM_MASK) {
25866+ if (v8086_mode(regs)) {
25867 /*
25868 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
25869 * On nmi (interrupt 2), do_trap should not be called.
25870@@ -123,12 +117,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
25871 return -1;
25872 }
25873 #endif
25874- if (!user_mode(regs)) {
25875+ if (!user_mode_novm(regs)) {
25876 if (!fixup_exception(regs)) {
25877 tsk->thread.error_code = error_code;
25878 tsk->thread.trap_nr = trapnr;
25879+
25880+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
25881+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
25882+ str = "PAX: suspicious stack segment fault";
25883+#endif
25884+
25885 die(str, regs, error_code);
25886 }
25887+
25888+#ifdef CONFIG_PAX_REFCOUNT
25889+ if (trapnr == 4)
25890+ pax_report_refcount_overflow(regs);
25891+#endif
25892+
25893 return 0;
25894 }
25895
25896@@ -136,7 +142,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
25897 }
25898
25899 static void __kprobes
25900-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
25901+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
25902 long error_code, siginfo_t *info)
25903 {
25904 struct task_struct *tsk = current;
25905@@ -160,7 +166,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
25906 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
25907 printk_ratelimit()) {
25908 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
25909- tsk->comm, tsk->pid, str,
25910+ tsk->comm, task_pid_nr(tsk), str,
25911 regs->ip, regs->sp, error_code);
25912 print_vma_addr(" in ", regs->ip);
25913 pr_cont("\n");
25914@@ -273,7 +279,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
25915 conditional_sti(regs);
25916
25917 #ifdef CONFIG_X86_32
25918- if (regs->flags & X86_VM_MASK) {
25919+ if (v8086_mode(regs)) {
25920 local_irq_enable();
25921 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
25922 goto exit;
25923@@ -281,18 +287,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
25924 #endif
25925
25926 tsk = current;
25927- if (!user_mode(regs)) {
25928+ if (!user_mode_novm(regs)) {
25929 if (fixup_exception(regs))
25930 goto exit;
25931
25932 tsk->thread.error_code = error_code;
25933 tsk->thread.trap_nr = X86_TRAP_GP;
25934 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
25935- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
25936+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
25937+
25938+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
25939+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
25940+ die("PAX: suspicious general protection fault", regs, error_code);
25941+ else
25942+#endif
25943+
25944 die("general protection fault", regs, error_code);
25945+ }
25946 goto exit;
25947 }
25948
25949+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
25950+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
25951+ struct mm_struct *mm = tsk->mm;
25952+ unsigned long limit;
25953+
25954+ down_write(&mm->mmap_sem);
25955+ limit = mm->context.user_cs_limit;
25956+ if (limit < TASK_SIZE) {
25957+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
25958+ up_write(&mm->mmap_sem);
25959+ return;
25960+ }
25961+ up_write(&mm->mmap_sem);
25962+ }
25963+#endif
25964+
25965 tsk->thread.error_code = error_code;
25966 tsk->thread.trap_nr = X86_TRAP_GP;
25967
25968@@ -450,7 +480,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
25969 /* It's safe to allow irq's after DR6 has been saved */
25970 preempt_conditional_sti(regs);
25971
25972- if (regs->flags & X86_VM_MASK) {
25973+ if (v8086_mode(regs)) {
25974 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
25975 X86_TRAP_DB);
25976 preempt_conditional_cli(regs);
25977@@ -465,7 +495,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
25978 * We already checked v86 mode above, so we can check for kernel mode
25979 * by just checking the CPL of CS.
25980 */
25981- if ((dr6 & DR_STEP) && !user_mode(regs)) {
25982+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
25983 tsk->thread.debugreg6 &= ~DR_STEP;
25984 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
25985 regs->flags &= ~X86_EFLAGS_TF;
25986@@ -497,7 +527,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
25987 return;
25988 conditional_sti(regs);
25989
25990- if (!user_mode_vm(regs))
25991+ if (!user_mode(regs))
25992 {
25993 if (!fixup_exception(regs)) {
25994 task->thread.error_code = error_code;
25995diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
25996index 2ed8459..7cf329f 100644
25997--- a/arch/x86/kernel/uprobes.c
25998+++ b/arch/x86/kernel/uprobes.c
25999@@ -629,7 +629,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
26000 int ret = NOTIFY_DONE;
26001
26002 /* We are only interested in userspace traps */
26003- if (regs && !user_mode_vm(regs))
26004+ if (regs && !user_mode(regs))
26005 return NOTIFY_DONE;
26006
26007 switch (val) {
26008@@ -719,7 +719,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
26009
26010 if (ncopied != rasize) {
26011 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
26012- "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
26013+ "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip);
26014
26015 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
26016 }
26017diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
26018index b9242ba..50c5edd 100644
26019--- a/arch/x86/kernel/verify_cpu.S
26020+++ b/arch/x86/kernel/verify_cpu.S
26021@@ -20,6 +20,7 @@
26022 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
26023 * arch/x86/kernel/trampoline_64.S: secondary processor verification
26024 * arch/x86/kernel/head_32.S: processor startup
26025+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
26026 *
26027 * verify_cpu, returns the status of longmode and SSE in register %eax.
26028 * 0: Success 1: Failure
26029diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
26030index e8edcf5..27f9344 100644
26031--- a/arch/x86/kernel/vm86_32.c
26032+++ b/arch/x86/kernel/vm86_32.c
26033@@ -44,6 +44,7 @@
26034 #include <linux/ptrace.h>
26035 #include <linux/audit.h>
26036 #include <linux/stddef.h>
26037+#include <linux/grsecurity.h>
26038
26039 #include <asm/uaccess.h>
26040 #include <asm/io.h>
26041@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
26042 do_exit(SIGSEGV);
26043 }
26044
26045- tss = &per_cpu(init_tss, get_cpu());
26046+ tss = init_tss + get_cpu();
26047 current->thread.sp0 = current->thread.saved_sp0;
26048 current->thread.sysenter_cs = __KERNEL_CS;
26049 load_sp0(tss, &current->thread);
26050@@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
26051
26052 if (tsk->thread.saved_sp0)
26053 return -EPERM;
26054+
26055+#ifdef CONFIG_GRKERNSEC_VM86
26056+ if (!capable(CAP_SYS_RAWIO)) {
26057+ gr_handle_vm86();
26058+ return -EPERM;
26059+ }
26060+#endif
26061+
26062 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
26063 offsetof(struct kernel_vm86_struct, vm86plus) -
26064 sizeof(info.regs));
26065@@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
26066 int tmp;
26067 struct vm86plus_struct __user *v86;
26068
26069+#ifdef CONFIG_GRKERNSEC_VM86
26070+ if (!capable(CAP_SYS_RAWIO)) {
26071+ gr_handle_vm86();
26072+ return -EPERM;
26073+ }
26074+#endif
26075+
26076 tsk = current;
26077 switch (cmd) {
26078 case VM86_REQUEST_IRQ:
26079@@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
26080 tsk->thread.saved_fs = info->regs32->fs;
26081 tsk->thread.saved_gs = get_user_gs(info->regs32);
26082
26083- tss = &per_cpu(init_tss, get_cpu());
26084+ tss = init_tss + get_cpu();
26085 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
26086 if (cpu_has_sep)
26087 tsk->thread.sysenter_cs = 0;
26088@@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
26089 goto cannot_handle;
26090 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
26091 goto cannot_handle;
26092- intr_ptr = (unsigned long __user *) (i << 2);
26093+ intr_ptr = (__force unsigned long __user *) (i << 2);
26094 if (get_user(segoffs, intr_ptr))
26095 goto cannot_handle;
26096 if ((segoffs >> 16) == BIOSSEG)
26097diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
26098index 10c4f30..57377c2 100644
26099--- a/arch/x86/kernel/vmlinux.lds.S
26100+++ b/arch/x86/kernel/vmlinux.lds.S
26101@@ -26,6 +26,13 @@
26102 #include <asm/page_types.h>
26103 #include <asm/cache.h>
26104 #include <asm/boot.h>
26105+#include <asm/segment.h>
26106+
26107+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26108+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
26109+#else
26110+#define __KERNEL_TEXT_OFFSET 0
26111+#endif
26112
26113 #undef i386 /* in case the preprocessor is a 32bit one */
26114
26115@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
26116
26117 PHDRS {
26118 text PT_LOAD FLAGS(5); /* R_E */
26119+#ifdef CONFIG_X86_32
26120+ module PT_LOAD FLAGS(5); /* R_E */
26121+#endif
26122+#ifdef CONFIG_XEN
26123+ rodata PT_LOAD FLAGS(5); /* R_E */
26124+#else
26125+ rodata PT_LOAD FLAGS(4); /* R__ */
26126+#endif
26127 data PT_LOAD FLAGS(6); /* RW_ */
26128-#ifdef CONFIG_X86_64
26129+ init.begin PT_LOAD FLAGS(6); /* RW_ */
26130 #ifdef CONFIG_SMP
26131 percpu PT_LOAD FLAGS(6); /* RW_ */
26132 #endif
26133+ text.init PT_LOAD FLAGS(5); /* R_E */
26134+ text.exit PT_LOAD FLAGS(5); /* R_E */
26135 init PT_LOAD FLAGS(7); /* RWE */
26136-#endif
26137 note PT_NOTE FLAGS(0); /* ___ */
26138 }
26139
26140 SECTIONS
26141 {
26142 #ifdef CONFIG_X86_32
26143- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
26144- phys_startup_32 = startup_32 - LOAD_OFFSET;
26145+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
26146 #else
26147- . = __START_KERNEL;
26148- phys_startup_64 = startup_64 - LOAD_OFFSET;
26149+ . = __START_KERNEL;
26150 #endif
26151
26152 /* Text and read-only data */
26153- .text : AT(ADDR(.text) - LOAD_OFFSET) {
26154- _text = .;
26155+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
26156 /* bootstrapping code */
26157+#ifdef CONFIG_X86_32
26158+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
26159+#else
26160+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
26161+#endif
26162+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
26163+ _text = .;
26164 HEAD_TEXT
26165 . = ALIGN(8);
26166 _stext = .;
26167@@ -104,13 +124,48 @@ SECTIONS
26168 IRQENTRY_TEXT
26169 *(.fixup)
26170 *(.gnu.warning)
26171- /* End of text section */
26172- _etext = .;
26173 } :text = 0x9090
26174
26175- NOTES :text :note
26176+ . += __KERNEL_TEXT_OFFSET;
26177
26178- EXCEPTION_TABLE(16) :text = 0x9090
26179+#ifdef CONFIG_X86_32
26180+ . = ALIGN(PAGE_SIZE);
26181+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
26182+
26183+#ifdef CONFIG_PAX_KERNEXEC
26184+ MODULES_EXEC_VADDR = .;
26185+ BYTE(0)
26186+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
26187+ . = ALIGN(HPAGE_SIZE) - 1;
26188+ MODULES_EXEC_END = .;
26189+#endif
26190+
26191+ } :module
26192+#endif
26193+
26194+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
26195+ /* End of text section */
26196+ BYTE(0)
26197+ _etext = . - __KERNEL_TEXT_OFFSET;
26198+ }
26199+
26200+#ifdef CONFIG_X86_32
26201+ . = ALIGN(PAGE_SIZE);
26202+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
26203+ *(.idt)
26204+ . = ALIGN(PAGE_SIZE);
26205+ *(.empty_zero_page)
26206+ *(.initial_pg_fixmap)
26207+ *(.initial_pg_pmd)
26208+ *(.initial_page_table)
26209+ *(.swapper_pg_dir)
26210+ } :rodata
26211+#endif
26212+
26213+ . = ALIGN(PAGE_SIZE);
26214+ NOTES :rodata :note
26215+
26216+ EXCEPTION_TABLE(16) :rodata
26217
26218 #if defined(CONFIG_DEBUG_RODATA)
26219 /* .text should occupy whole number of pages */
26220@@ -122,16 +177,20 @@ SECTIONS
26221
26222 /* Data */
26223 .data : AT(ADDR(.data) - LOAD_OFFSET) {
26224+
26225+#ifdef CONFIG_PAX_KERNEXEC
26226+ . = ALIGN(HPAGE_SIZE);
26227+#else
26228+ . = ALIGN(PAGE_SIZE);
26229+#endif
26230+
26231 /* Start of data section */
26232 _sdata = .;
26233
26234 /* init_task */
26235 INIT_TASK_DATA(THREAD_SIZE)
26236
26237-#ifdef CONFIG_X86_32
26238- /* 32 bit has nosave before _edata */
26239 NOSAVE_DATA
26240-#endif
26241
26242 PAGE_ALIGNED_DATA(PAGE_SIZE)
26243
26244@@ -172,12 +231,19 @@ SECTIONS
26245 #endif /* CONFIG_X86_64 */
26246
26247 /* Init code and data - will be freed after init */
26248- . = ALIGN(PAGE_SIZE);
26249 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
26250+ BYTE(0)
26251+
26252+#ifdef CONFIG_PAX_KERNEXEC
26253+ . = ALIGN(HPAGE_SIZE);
26254+#else
26255+ . = ALIGN(PAGE_SIZE);
26256+#endif
26257+
26258 __init_begin = .; /* paired with __init_end */
26259- }
26260+ } :init.begin
26261
26262-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
26263+#ifdef CONFIG_SMP
26264 /*
26265 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
26266 * output PHDR, so the next output section - .init.text - should
26267@@ -186,12 +252,27 @@ SECTIONS
26268 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
26269 #endif
26270
26271- INIT_TEXT_SECTION(PAGE_SIZE)
26272-#ifdef CONFIG_X86_64
26273- :init
26274-#endif
26275+ . = ALIGN(PAGE_SIZE);
26276+ init_begin = .;
26277+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
26278+ VMLINUX_SYMBOL(_sinittext) = .;
26279+ INIT_TEXT
26280+ VMLINUX_SYMBOL(_einittext) = .;
26281+ . = ALIGN(PAGE_SIZE);
26282+ } :text.init
26283
26284- INIT_DATA_SECTION(16)
26285+ /*
26286+ * .exit.text is discard at runtime, not link time, to deal with
26287+ * references from .altinstructions and .eh_frame
26288+ */
26289+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
26290+ EXIT_TEXT
26291+ . = ALIGN(16);
26292+ } :text.exit
26293+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
26294+
26295+ . = ALIGN(PAGE_SIZE);
26296+ INIT_DATA_SECTION(16) :init
26297
26298 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
26299 __x86_cpu_dev_start = .;
26300@@ -253,19 +334,12 @@ SECTIONS
26301 }
26302
26303 . = ALIGN(8);
26304- /*
26305- * .exit.text is discard at runtime, not link time, to deal with
26306- * references from .altinstructions and .eh_frame
26307- */
26308- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
26309- EXIT_TEXT
26310- }
26311
26312 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
26313 EXIT_DATA
26314 }
26315
26316-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
26317+#ifndef CONFIG_SMP
26318 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
26319 #endif
26320
26321@@ -284,16 +358,10 @@ SECTIONS
26322 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
26323 __smp_locks = .;
26324 *(.smp_locks)
26325- . = ALIGN(PAGE_SIZE);
26326 __smp_locks_end = .;
26327+ . = ALIGN(PAGE_SIZE);
26328 }
26329
26330-#ifdef CONFIG_X86_64
26331- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
26332- NOSAVE_DATA
26333- }
26334-#endif
26335-
26336 /* BSS */
26337 . = ALIGN(PAGE_SIZE);
26338 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
26339@@ -309,6 +377,7 @@ SECTIONS
26340 __brk_base = .;
26341 . += 64 * 1024; /* 64k alignment slop space */
26342 *(.brk_reservation) /* areas brk users have reserved */
26343+ . = ALIGN(HPAGE_SIZE);
26344 __brk_limit = .;
26345 }
26346
26347@@ -335,13 +404,12 @@ SECTIONS
26348 * for the boot processor.
26349 */
26350 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
26351-INIT_PER_CPU(gdt_page);
26352 INIT_PER_CPU(irq_stack_union);
26353
26354 /*
26355 * Build-time check on the image size:
26356 */
26357-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
26358+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
26359 "kernel image bigger than KERNEL_IMAGE_SIZE");
26360
26361 #ifdef CONFIG_SMP
26362diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
26363index 9a907a6..f83f921 100644
26364--- a/arch/x86/kernel/vsyscall_64.c
26365+++ b/arch/x86/kernel/vsyscall_64.c
26366@@ -56,15 +56,13 @@
26367 DEFINE_VVAR(int, vgetcpu_mode);
26368 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
26369
26370-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
26371+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
26372
26373 static int __init vsyscall_setup(char *str)
26374 {
26375 if (str) {
26376 if (!strcmp("emulate", str))
26377 vsyscall_mode = EMULATE;
26378- else if (!strcmp("native", str))
26379- vsyscall_mode = NATIVE;
26380 else if (!strcmp("none", str))
26381 vsyscall_mode = NONE;
26382 else
26383@@ -323,8 +321,7 @@ do_ret:
26384 return true;
26385
26386 sigsegv:
26387- force_sig(SIGSEGV, current);
26388- return true;
26389+ do_group_exit(SIGKILL);
26390 }
26391
26392 /*
26393@@ -377,10 +374,7 @@ void __init map_vsyscall(void)
26394 extern char __vvar_page;
26395 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
26396
26397- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
26398- vsyscall_mode == NATIVE
26399- ? PAGE_KERNEL_VSYSCALL
26400- : PAGE_KERNEL_VVAR);
26401+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
26402 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
26403 (unsigned long)VSYSCALL_START);
26404
26405diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
26406index b014d94..e775258 100644
26407--- a/arch/x86/kernel/x8664_ksyms_64.c
26408+++ b/arch/x86/kernel/x8664_ksyms_64.c
26409@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
26410 EXPORT_SYMBOL(copy_user_generic_unrolled);
26411 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
26412 EXPORT_SYMBOL(__copy_user_nocache);
26413-EXPORT_SYMBOL(_copy_from_user);
26414-EXPORT_SYMBOL(_copy_to_user);
26415
26416 EXPORT_SYMBOL(copy_page);
26417 EXPORT_SYMBOL(clear_page);
26418@@ -66,3 +64,7 @@ EXPORT_SYMBOL(empty_zero_page);
26419 #ifndef CONFIG_PARAVIRT
26420 EXPORT_SYMBOL(native_load_gs_index);
26421 #endif
26422+
26423+#ifdef CONFIG_PAX_PER_CPU_PGD
26424+EXPORT_SYMBOL(cpu_pgd);
26425+#endif
26426diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
26427index 45a14db..075bb9b 100644
26428--- a/arch/x86/kernel/x86_init.c
26429+++ b/arch/x86/kernel/x86_init.c
26430@@ -85,7 +85,7 @@ struct x86_init_ops x86_init __initdata = {
26431 },
26432 };
26433
26434-struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
26435+struct x86_cpuinit_ops x86_cpuinit __cpuinitconst = {
26436 .early_percpu_clock_init = x86_init_noop,
26437 .setup_percpu_clockev = setup_secondary_APIC_clock,
26438 };
26439@@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
26440 static void default_nmi_init(void) { };
26441 static int default_i8042_detect(void) { return 1; };
26442
26443-struct x86_platform_ops x86_platform = {
26444+struct x86_platform_ops x86_platform __read_only = {
26445 .calibrate_tsc = native_calibrate_tsc,
26446 .get_wallclock = mach_get_cmos_time,
26447 .set_wallclock = mach_set_rtc_mmss,
26448@@ -107,7 +107,7 @@ struct x86_platform_ops x86_platform = {
26449 };
26450
26451 EXPORT_SYMBOL_GPL(x86_platform);
26452-struct x86_msi_ops x86_msi = {
26453+struct x86_msi_ops x86_msi __read_only = {
26454 .setup_msi_irqs = native_setup_msi_irqs,
26455 .compose_msi_msg = native_compose_msi_msg,
26456 .teardown_msi_irq = native_teardown_msi_irq,
26457@@ -116,7 +116,7 @@ struct x86_msi_ops x86_msi = {
26458 .setup_hpet_msi = default_setup_hpet_msi,
26459 };
26460
26461-struct x86_io_apic_ops x86_io_apic_ops = {
26462+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
26463 .init = native_io_apic_init_mappings,
26464 .read = native_io_apic_read,
26465 .write = native_io_apic_write,
26466diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
26467index ada87a3..afea76d 100644
26468--- a/arch/x86/kernel/xsave.c
26469+++ b/arch/x86/kernel/xsave.c
26470@@ -199,6 +199,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
26471 {
26472 int err;
26473
26474+ buf = (struct xsave_struct __user *)____m(buf);
26475 if (use_xsave())
26476 err = xsave_user(buf);
26477 else if (use_fxsr())
26478@@ -311,6 +312,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
26479 */
26480 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
26481 {
26482+ buf = (void __user *)____m(buf);
26483 if (use_xsave()) {
26484 if ((unsigned long)buf % 64 || fx_only) {
26485 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
26486diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
26487index a20ecb5..d0e2194 100644
26488--- a/arch/x86/kvm/cpuid.c
26489+++ b/arch/x86/kvm/cpuid.c
26490@@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
26491 struct kvm_cpuid2 *cpuid,
26492 struct kvm_cpuid_entry2 __user *entries)
26493 {
26494- int r;
26495+ int r, i;
26496
26497 r = -E2BIG;
26498 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
26499 goto out;
26500 r = -EFAULT;
26501- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
26502- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
26503+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
26504 goto out;
26505+ for (i = 0; i < cpuid->nent; ++i) {
26506+ struct kvm_cpuid_entry2 cpuid_entry;
26507+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
26508+ goto out;
26509+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
26510+ }
26511 vcpu->arch.cpuid_nent = cpuid->nent;
26512 kvm_apic_set_version(vcpu);
26513 kvm_x86_ops->cpuid_update(vcpu);
26514@@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
26515 struct kvm_cpuid2 *cpuid,
26516 struct kvm_cpuid_entry2 __user *entries)
26517 {
26518- int r;
26519+ int r, i;
26520
26521 r = -E2BIG;
26522 if (cpuid->nent < vcpu->arch.cpuid_nent)
26523 goto out;
26524 r = -EFAULT;
26525- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
26526- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
26527+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
26528 goto out;
26529+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
26530+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
26531+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
26532+ goto out;
26533+ }
26534 return 0;
26535
26536 out:
26537diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
26538index 5953dce..f11a7d2 100644
26539--- a/arch/x86/kvm/emulate.c
26540+++ b/arch/x86/kvm/emulate.c
26541@@ -329,6 +329,7 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
26542
26543 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
26544 do { \
26545+ unsigned long _tmp; \
26546 __asm__ __volatile__ ( \
26547 _PRE_EFLAGS("0", "4", "2") \
26548 _op _suffix " %"_x"3,%1; " \
26549@@ -343,8 +344,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
26550 /* Raw emulation: instruction has two explicit operands. */
26551 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
26552 do { \
26553- unsigned long _tmp; \
26554- \
26555 switch ((ctxt)->dst.bytes) { \
26556 case 2: \
26557 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
26558@@ -360,7 +359,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
26559
26560 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
26561 do { \
26562- unsigned long _tmp; \
26563 switch ((ctxt)->dst.bytes) { \
26564 case 1: \
26565 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
26566diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
26567index 0eee2c8..94a32c3 100644
26568--- a/arch/x86/kvm/lapic.c
26569+++ b/arch/x86/kvm/lapic.c
26570@@ -55,7 +55,7 @@
26571 #define APIC_BUS_CYCLE_NS 1
26572
26573 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
26574-#define apic_debug(fmt, arg...)
26575+#define apic_debug(fmt, arg...) do {} while (0)
26576
26577 #define APIC_LVT_NUM 6
26578 /* 14 is the version for Xeon and Pentium 8.4.8*/
26579diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
26580index da20860..d19fdf5 100644
26581--- a/arch/x86/kvm/paging_tmpl.h
26582+++ b/arch/x86/kvm/paging_tmpl.h
26583@@ -208,7 +208,7 @@ retry_walk:
26584 if (unlikely(kvm_is_error_hva(host_addr)))
26585 goto error;
26586
26587- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
26588+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
26589 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
26590 goto error;
26591 walker->ptep_user[walker->level - 1] = ptep_user;
26592diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
26593index a14a6ea..dc86cf0 100644
26594--- a/arch/x86/kvm/svm.c
26595+++ b/arch/x86/kvm/svm.c
26596@@ -3493,7 +3493,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
26597 int cpu = raw_smp_processor_id();
26598
26599 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
26600+
26601+ pax_open_kernel();
26602 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
26603+ pax_close_kernel();
26604+
26605 load_TR_desc();
26606 }
26607
26608@@ -3894,6 +3898,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
26609 #endif
26610 #endif
26611
26612+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
26613+ __set_fs(current_thread_info()->addr_limit);
26614+#endif
26615+
26616 reload_tss(vcpu);
26617
26618 local_irq_disable();
26619diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
26620index 5402c94..c3bdeee 100644
26621--- a/arch/x86/kvm/vmx.c
26622+++ b/arch/x86/kvm/vmx.c
26623@@ -1311,12 +1311,12 @@ static void vmcs_write64(unsigned long field, u64 value)
26624 #endif
26625 }
26626
26627-static void vmcs_clear_bits(unsigned long field, u32 mask)
26628+static void vmcs_clear_bits(unsigned long field, unsigned long mask)
26629 {
26630 vmcs_writel(field, vmcs_readl(field) & ~mask);
26631 }
26632
26633-static void vmcs_set_bits(unsigned long field, u32 mask)
26634+static void vmcs_set_bits(unsigned long field, unsigned long mask)
26635 {
26636 vmcs_writel(field, vmcs_readl(field) | mask);
26637 }
26638@@ -1517,7 +1517,11 @@ static void reload_tss(void)
26639 struct desc_struct *descs;
26640
26641 descs = (void *)gdt->address;
26642+
26643+ pax_open_kernel();
26644 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
26645+ pax_close_kernel();
26646+
26647 load_TR_desc();
26648 }
26649
26650@@ -1741,6 +1745,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
26651 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
26652 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
26653
26654+#ifdef CONFIG_PAX_PER_CPU_PGD
26655+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
26656+#endif
26657+
26658 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
26659 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
26660 vmx->loaded_vmcs->cpu = cpu;
26661@@ -2935,8 +2943,11 @@ static __init int hardware_setup(void)
26662 if (!cpu_has_vmx_flexpriority())
26663 flexpriority_enabled = 0;
26664
26665- if (!cpu_has_vmx_tpr_shadow())
26666- kvm_x86_ops->update_cr8_intercept = NULL;
26667+ if (!cpu_has_vmx_tpr_shadow()) {
26668+ pax_open_kernel();
26669+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
26670+ pax_close_kernel();
26671+ }
26672
26673 if (enable_ept && !cpu_has_vmx_ept_2m_page())
26674 kvm_disable_largepages();
26675@@ -2947,13 +2958,15 @@ static __init int hardware_setup(void)
26676 if (!cpu_has_vmx_apicv())
26677 enable_apicv = 0;
26678
26679+ pax_open_kernel();
26680 if (enable_apicv)
26681- kvm_x86_ops->update_cr8_intercept = NULL;
26682+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
26683 else {
26684- kvm_x86_ops->hwapic_irr_update = NULL;
26685- kvm_x86_ops->deliver_posted_interrupt = NULL;
26686- kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
26687+ *(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
26688+ *(void **)&kvm_x86_ops->deliver_posted_interrupt = NULL;
26689+ *(void **)&kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
26690 }
26691+ pax_close_kernel();
26692
26693 if (nested)
26694 nested_vmx_setup_ctls_msrs();
26695@@ -4076,7 +4089,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
26696
26697 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
26698 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
26699+
26700+#ifndef CONFIG_PAX_PER_CPU_PGD
26701 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
26702+#endif
26703
26704 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
26705 #ifdef CONFIG_X86_64
26706@@ -4098,7 +4114,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
26707 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
26708 vmx->host_idt_base = dt.address;
26709
26710- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
26711+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
26712
26713 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
26714 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
26715@@ -7030,6 +7046,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
26716 "jmp 2f \n\t"
26717 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
26718 "2: "
26719+
26720+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26721+ "ljmp %[cs],$3f\n\t"
26722+ "3: "
26723+#endif
26724+
26725 /* Save guest registers, load host registers, keep flags */
26726 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
26727 "pop %0 \n\t"
26728@@ -7082,6 +7104,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
26729 #endif
26730 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
26731 [wordsize]"i"(sizeof(ulong))
26732+
26733+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26734+ ,[cs]"i"(__KERNEL_CS)
26735+#endif
26736+
26737 : "cc", "memory"
26738 #ifdef CONFIG_X86_64
26739 , "rax", "rbx", "rdi", "rsi"
26740@@ -7095,7 +7122,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
26741 if (debugctlmsr)
26742 update_debugctlmsr(debugctlmsr);
26743
26744-#ifndef CONFIG_X86_64
26745+#ifdef CONFIG_X86_32
26746 /*
26747 * The sysexit path does not restore ds/es, so we must set them to
26748 * a reasonable value ourselves.
26749@@ -7104,8 +7131,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
26750 * may be executed in interrupt context, which saves and restore segments
26751 * around it, nullifying its effect.
26752 */
26753- loadsegment(ds, __USER_DS);
26754- loadsegment(es, __USER_DS);
26755+ loadsegment(ds, __KERNEL_DS);
26756+ loadsegment(es, __KERNEL_DS);
26757+ loadsegment(ss, __KERNEL_DS);
26758+
26759+#ifdef CONFIG_PAX_KERNEXEC
26760+ loadsegment(fs, __KERNEL_PERCPU);
26761+#endif
26762+
26763+#ifdef CONFIG_PAX_MEMORY_UDEREF
26764+ __set_fs(current_thread_info()->addr_limit);
26765+#endif
26766+
26767 #endif
26768
26769 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
26770diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
26771index e8ba99c..ee9d7d9 100644
26772--- a/arch/x86/kvm/x86.c
26773+++ b/arch/x86/kvm/x86.c
26774@@ -1725,8 +1725,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
26775 {
26776 struct kvm *kvm = vcpu->kvm;
26777 int lm = is_long_mode(vcpu);
26778- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
26779- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
26780+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
26781+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
26782 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
26783 : kvm->arch.xen_hvm_config.blob_size_32;
26784 u32 page_num = data & ~PAGE_MASK;
26785@@ -2609,6 +2609,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
26786 if (n < msr_list.nmsrs)
26787 goto out;
26788 r = -EFAULT;
26789+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
26790+ goto out;
26791 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
26792 num_msrs_to_save * sizeof(u32)))
26793 goto out;
26794@@ -5297,7 +5299,7 @@ static struct notifier_block pvclock_gtod_notifier = {
26795 };
26796 #endif
26797
26798-int kvm_arch_init(void *opaque)
26799+int kvm_arch_init(const void *opaque)
26800 {
26801 int r;
26802 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
26803diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
26804index 7114c63..a1018fc 100644
26805--- a/arch/x86/lguest/boot.c
26806+++ b/arch/x86/lguest/boot.c
26807@@ -1201,9 +1201,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
26808 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
26809 * Launcher to reboot us.
26810 */
26811-static void lguest_restart(char *reason)
26812+static __noreturn void lguest_restart(char *reason)
26813 {
26814 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
26815+ BUG();
26816 }
26817
26818 /*G:050
26819diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
26820index 00933d5..3a64af9 100644
26821--- a/arch/x86/lib/atomic64_386_32.S
26822+++ b/arch/x86/lib/atomic64_386_32.S
26823@@ -48,6 +48,10 @@ BEGIN(read)
26824 movl (v), %eax
26825 movl 4(v), %edx
26826 RET_ENDP
26827+BEGIN(read_unchecked)
26828+ movl (v), %eax
26829+ movl 4(v), %edx
26830+RET_ENDP
26831 #undef v
26832
26833 #define v %esi
26834@@ -55,6 +59,10 @@ BEGIN(set)
26835 movl %ebx, (v)
26836 movl %ecx, 4(v)
26837 RET_ENDP
26838+BEGIN(set_unchecked)
26839+ movl %ebx, (v)
26840+ movl %ecx, 4(v)
26841+RET_ENDP
26842 #undef v
26843
26844 #define v %esi
26845@@ -70,6 +78,20 @@ RET_ENDP
26846 BEGIN(add)
26847 addl %eax, (v)
26848 adcl %edx, 4(v)
26849+
26850+#ifdef CONFIG_PAX_REFCOUNT
26851+ jno 0f
26852+ subl %eax, (v)
26853+ sbbl %edx, 4(v)
26854+ int $4
26855+0:
26856+ _ASM_EXTABLE(0b, 0b)
26857+#endif
26858+
26859+RET_ENDP
26860+BEGIN(add_unchecked)
26861+ addl %eax, (v)
26862+ adcl %edx, 4(v)
26863 RET_ENDP
26864 #undef v
26865
26866@@ -77,6 +99,24 @@ RET_ENDP
26867 BEGIN(add_return)
26868 addl (v), %eax
26869 adcl 4(v), %edx
26870+
26871+#ifdef CONFIG_PAX_REFCOUNT
26872+ into
26873+1234:
26874+ _ASM_EXTABLE(1234b, 2f)
26875+#endif
26876+
26877+ movl %eax, (v)
26878+ movl %edx, 4(v)
26879+
26880+#ifdef CONFIG_PAX_REFCOUNT
26881+2:
26882+#endif
26883+
26884+RET_ENDP
26885+BEGIN(add_return_unchecked)
26886+ addl (v), %eax
26887+ adcl 4(v), %edx
26888 movl %eax, (v)
26889 movl %edx, 4(v)
26890 RET_ENDP
26891@@ -86,6 +126,20 @@ RET_ENDP
26892 BEGIN(sub)
26893 subl %eax, (v)
26894 sbbl %edx, 4(v)
26895+
26896+#ifdef CONFIG_PAX_REFCOUNT
26897+ jno 0f
26898+ addl %eax, (v)
26899+ adcl %edx, 4(v)
26900+ int $4
26901+0:
26902+ _ASM_EXTABLE(0b, 0b)
26903+#endif
26904+
26905+RET_ENDP
26906+BEGIN(sub_unchecked)
26907+ subl %eax, (v)
26908+ sbbl %edx, 4(v)
26909 RET_ENDP
26910 #undef v
26911
26912@@ -96,6 +150,27 @@ BEGIN(sub_return)
26913 sbbl $0, %edx
26914 addl (v), %eax
26915 adcl 4(v), %edx
26916+
26917+#ifdef CONFIG_PAX_REFCOUNT
26918+ into
26919+1234:
26920+ _ASM_EXTABLE(1234b, 2f)
26921+#endif
26922+
26923+ movl %eax, (v)
26924+ movl %edx, 4(v)
26925+
26926+#ifdef CONFIG_PAX_REFCOUNT
26927+2:
26928+#endif
26929+
26930+RET_ENDP
26931+BEGIN(sub_return_unchecked)
26932+ negl %edx
26933+ negl %eax
26934+ sbbl $0, %edx
26935+ addl (v), %eax
26936+ adcl 4(v), %edx
26937 movl %eax, (v)
26938 movl %edx, 4(v)
26939 RET_ENDP
26940@@ -105,6 +180,20 @@ RET_ENDP
26941 BEGIN(inc)
26942 addl $1, (v)
26943 adcl $0, 4(v)
26944+
26945+#ifdef CONFIG_PAX_REFCOUNT
26946+ jno 0f
26947+ subl $1, (v)
26948+ sbbl $0, 4(v)
26949+ int $4
26950+0:
26951+ _ASM_EXTABLE(0b, 0b)
26952+#endif
26953+
26954+RET_ENDP
26955+BEGIN(inc_unchecked)
26956+ addl $1, (v)
26957+ adcl $0, 4(v)
26958 RET_ENDP
26959 #undef v
26960
26961@@ -114,6 +203,26 @@ BEGIN(inc_return)
26962 movl 4(v), %edx
26963 addl $1, %eax
26964 adcl $0, %edx
26965+
26966+#ifdef CONFIG_PAX_REFCOUNT
26967+ into
26968+1234:
26969+ _ASM_EXTABLE(1234b, 2f)
26970+#endif
26971+
26972+ movl %eax, (v)
26973+ movl %edx, 4(v)
26974+
26975+#ifdef CONFIG_PAX_REFCOUNT
26976+2:
26977+#endif
26978+
26979+RET_ENDP
26980+BEGIN(inc_return_unchecked)
26981+ movl (v), %eax
26982+ movl 4(v), %edx
26983+ addl $1, %eax
26984+ adcl $0, %edx
26985 movl %eax, (v)
26986 movl %edx, 4(v)
26987 RET_ENDP
26988@@ -123,6 +232,20 @@ RET_ENDP
26989 BEGIN(dec)
26990 subl $1, (v)
26991 sbbl $0, 4(v)
26992+
26993+#ifdef CONFIG_PAX_REFCOUNT
26994+ jno 0f
26995+ addl $1, (v)
26996+ adcl $0, 4(v)
26997+ int $4
26998+0:
26999+ _ASM_EXTABLE(0b, 0b)
27000+#endif
27001+
27002+RET_ENDP
27003+BEGIN(dec_unchecked)
27004+ subl $1, (v)
27005+ sbbl $0, 4(v)
27006 RET_ENDP
27007 #undef v
27008
27009@@ -132,6 +255,26 @@ BEGIN(dec_return)
27010 movl 4(v), %edx
27011 subl $1, %eax
27012 sbbl $0, %edx
27013+
27014+#ifdef CONFIG_PAX_REFCOUNT
27015+ into
27016+1234:
27017+ _ASM_EXTABLE(1234b, 2f)
27018+#endif
27019+
27020+ movl %eax, (v)
27021+ movl %edx, 4(v)
27022+
27023+#ifdef CONFIG_PAX_REFCOUNT
27024+2:
27025+#endif
27026+
27027+RET_ENDP
27028+BEGIN(dec_return_unchecked)
27029+ movl (v), %eax
27030+ movl 4(v), %edx
27031+ subl $1, %eax
27032+ sbbl $0, %edx
27033 movl %eax, (v)
27034 movl %edx, 4(v)
27035 RET_ENDP
27036@@ -143,6 +286,13 @@ BEGIN(add_unless)
27037 adcl %edx, %edi
27038 addl (v), %eax
27039 adcl 4(v), %edx
27040+
27041+#ifdef CONFIG_PAX_REFCOUNT
27042+ into
27043+1234:
27044+ _ASM_EXTABLE(1234b, 2f)
27045+#endif
27046+
27047 cmpl %eax, %ecx
27048 je 3f
27049 1:
27050@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
27051 1:
27052 addl $1, %eax
27053 adcl $0, %edx
27054+
27055+#ifdef CONFIG_PAX_REFCOUNT
27056+ into
27057+1234:
27058+ _ASM_EXTABLE(1234b, 2f)
27059+#endif
27060+
27061 movl %eax, (v)
27062 movl %edx, 4(v)
27063 movl $1, %eax
27064@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
27065 movl 4(v), %edx
27066 subl $1, %eax
27067 sbbl $0, %edx
27068+
27069+#ifdef CONFIG_PAX_REFCOUNT
27070+ into
27071+1234:
27072+ _ASM_EXTABLE(1234b, 1f)
27073+#endif
27074+
27075 js 1f
27076 movl %eax, (v)
27077 movl %edx, 4(v)
27078diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
27079index f5cc9eb..51fa319 100644
27080--- a/arch/x86/lib/atomic64_cx8_32.S
27081+++ b/arch/x86/lib/atomic64_cx8_32.S
27082@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
27083 CFI_STARTPROC
27084
27085 read64 %ecx
27086+ pax_force_retaddr
27087 ret
27088 CFI_ENDPROC
27089 ENDPROC(atomic64_read_cx8)
27090
27091+ENTRY(atomic64_read_unchecked_cx8)
27092+ CFI_STARTPROC
27093+
27094+ read64 %ecx
27095+ pax_force_retaddr
27096+ ret
27097+ CFI_ENDPROC
27098+ENDPROC(atomic64_read_unchecked_cx8)
27099+
27100 ENTRY(atomic64_set_cx8)
27101 CFI_STARTPROC
27102
27103@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
27104 cmpxchg8b (%esi)
27105 jne 1b
27106
27107+ pax_force_retaddr
27108 ret
27109 CFI_ENDPROC
27110 ENDPROC(atomic64_set_cx8)
27111
27112+ENTRY(atomic64_set_unchecked_cx8)
27113+ CFI_STARTPROC
27114+
27115+1:
27116+/* we don't need LOCK_PREFIX since aligned 64-bit writes
27117+ * are atomic on 586 and newer */
27118+ cmpxchg8b (%esi)
27119+ jne 1b
27120+
27121+ pax_force_retaddr
27122+ ret
27123+ CFI_ENDPROC
27124+ENDPROC(atomic64_set_unchecked_cx8)
27125+
27126 ENTRY(atomic64_xchg_cx8)
27127 CFI_STARTPROC
27128
27129@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
27130 cmpxchg8b (%esi)
27131 jne 1b
27132
27133+ pax_force_retaddr
27134 ret
27135 CFI_ENDPROC
27136 ENDPROC(atomic64_xchg_cx8)
27137
27138-.macro addsub_return func ins insc
27139-ENTRY(atomic64_\func\()_return_cx8)
27140+.macro addsub_return func ins insc unchecked=""
27141+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
27142 CFI_STARTPROC
27143 SAVE ebp
27144 SAVE ebx
27145@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
27146 movl %edx, %ecx
27147 \ins\()l %esi, %ebx
27148 \insc\()l %edi, %ecx
27149+
27150+.ifb \unchecked
27151+#ifdef CONFIG_PAX_REFCOUNT
27152+ into
27153+2:
27154+ _ASM_EXTABLE(2b, 3f)
27155+#endif
27156+.endif
27157+
27158 LOCK_PREFIX
27159 cmpxchg8b (%ebp)
27160 jne 1b
27161-
27162-10:
27163 movl %ebx, %eax
27164 movl %ecx, %edx
27165+
27166+.ifb \unchecked
27167+#ifdef CONFIG_PAX_REFCOUNT
27168+3:
27169+#endif
27170+.endif
27171+
27172 RESTORE edi
27173 RESTORE esi
27174 RESTORE ebx
27175 RESTORE ebp
27176+ pax_force_retaddr
27177 ret
27178 CFI_ENDPROC
27179-ENDPROC(atomic64_\func\()_return_cx8)
27180+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
27181 .endm
27182
27183 addsub_return add add adc
27184 addsub_return sub sub sbb
27185+addsub_return add add adc _unchecked
27186+addsub_return sub sub sbb _unchecked
27187
27188-.macro incdec_return func ins insc
27189-ENTRY(atomic64_\func\()_return_cx8)
27190+.macro incdec_return func ins insc unchecked=""
27191+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
27192 CFI_STARTPROC
27193 SAVE ebx
27194
27195@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
27196 movl %edx, %ecx
27197 \ins\()l $1, %ebx
27198 \insc\()l $0, %ecx
27199+
27200+.ifb \unchecked
27201+#ifdef CONFIG_PAX_REFCOUNT
27202+ into
27203+2:
27204+ _ASM_EXTABLE(2b, 3f)
27205+#endif
27206+.endif
27207+
27208 LOCK_PREFIX
27209 cmpxchg8b (%esi)
27210 jne 1b
27211
27212-10:
27213 movl %ebx, %eax
27214 movl %ecx, %edx
27215+
27216+.ifb \unchecked
27217+#ifdef CONFIG_PAX_REFCOUNT
27218+3:
27219+#endif
27220+.endif
27221+
27222 RESTORE ebx
27223+ pax_force_retaddr
27224 ret
27225 CFI_ENDPROC
27226-ENDPROC(atomic64_\func\()_return_cx8)
27227+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
27228 .endm
27229
27230 incdec_return inc add adc
27231 incdec_return dec sub sbb
27232+incdec_return inc add adc _unchecked
27233+incdec_return dec sub sbb _unchecked
27234
27235 ENTRY(atomic64_dec_if_positive_cx8)
27236 CFI_STARTPROC
27237@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
27238 movl %edx, %ecx
27239 subl $1, %ebx
27240 sbb $0, %ecx
27241+
27242+#ifdef CONFIG_PAX_REFCOUNT
27243+ into
27244+1234:
27245+ _ASM_EXTABLE(1234b, 2f)
27246+#endif
27247+
27248 js 2f
27249 LOCK_PREFIX
27250 cmpxchg8b (%esi)
27251@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
27252 movl %ebx, %eax
27253 movl %ecx, %edx
27254 RESTORE ebx
27255+ pax_force_retaddr
27256 ret
27257 CFI_ENDPROC
27258 ENDPROC(atomic64_dec_if_positive_cx8)
27259@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
27260 movl %edx, %ecx
27261 addl %ebp, %ebx
27262 adcl %edi, %ecx
27263+
27264+#ifdef CONFIG_PAX_REFCOUNT
27265+ into
27266+1234:
27267+ _ASM_EXTABLE(1234b, 3f)
27268+#endif
27269+
27270 LOCK_PREFIX
27271 cmpxchg8b (%esi)
27272 jne 1b
27273@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
27274 CFI_ADJUST_CFA_OFFSET -8
27275 RESTORE ebx
27276 RESTORE ebp
27277+ pax_force_retaddr
27278 ret
27279 4:
27280 cmpl %edx, 4(%esp)
27281@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
27282 xorl %ecx, %ecx
27283 addl $1, %ebx
27284 adcl %edx, %ecx
27285+
27286+#ifdef CONFIG_PAX_REFCOUNT
27287+ into
27288+1234:
27289+ _ASM_EXTABLE(1234b, 3f)
27290+#endif
27291+
27292 LOCK_PREFIX
27293 cmpxchg8b (%esi)
27294 jne 1b
27295@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
27296 movl $1, %eax
27297 3:
27298 RESTORE ebx
27299+ pax_force_retaddr
27300 ret
27301 CFI_ENDPROC
27302 ENDPROC(atomic64_inc_not_zero_cx8)
27303diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
27304index e78b8ee..7e173a8 100644
27305--- a/arch/x86/lib/checksum_32.S
27306+++ b/arch/x86/lib/checksum_32.S
27307@@ -29,7 +29,8 @@
27308 #include <asm/dwarf2.h>
27309 #include <asm/errno.h>
27310 #include <asm/asm.h>
27311-
27312+#include <asm/segment.h>
27313+
27314 /*
27315 * computes a partial checksum, e.g. for TCP/UDP fragments
27316 */
27317@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
27318
27319 #define ARGBASE 16
27320 #define FP 12
27321-
27322-ENTRY(csum_partial_copy_generic)
27323+
27324+ENTRY(csum_partial_copy_generic_to_user)
27325 CFI_STARTPROC
27326+
27327+#ifdef CONFIG_PAX_MEMORY_UDEREF
27328+ pushl_cfi %gs
27329+ popl_cfi %es
27330+ jmp csum_partial_copy_generic
27331+#endif
27332+
27333+ENTRY(csum_partial_copy_generic_from_user)
27334+
27335+#ifdef CONFIG_PAX_MEMORY_UDEREF
27336+ pushl_cfi %gs
27337+ popl_cfi %ds
27338+#endif
27339+
27340+ENTRY(csum_partial_copy_generic)
27341 subl $4,%esp
27342 CFI_ADJUST_CFA_OFFSET 4
27343 pushl_cfi %edi
27344@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
27345 jmp 4f
27346 SRC(1: movw (%esi), %bx )
27347 addl $2, %esi
27348-DST( movw %bx, (%edi) )
27349+DST( movw %bx, %es:(%edi) )
27350 addl $2, %edi
27351 addw %bx, %ax
27352 adcl $0, %eax
27353@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
27354 SRC(1: movl (%esi), %ebx )
27355 SRC( movl 4(%esi), %edx )
27356 adcl %ebx, %eax
27357-DST( movl %ebx, (%edi) )
27358+DST( movl %ebx, %es:(%edi) )
27359 adcl %edx, %eax
27360-DST( movl %edx, 4(%edi) )
27361+DST( movl %edx, %es:4(%edi) )
27362
27363 SRC( movl 8(%esi), %ebx )
27364 SRC( movl 12(%esi), %edx )
27365 adcl %ebx, %eax
27366-DST( movl %ebx, 8(%edi) )
27367+DST( movl %ebx, %es:8(%edi) )
27368 adcl %edx, %eax
27369-DST( movl %edx, 12(%edi) )
27370+DST( movl %edx, %es:12(%edi) )
27371
27372 SRC( movl 16(%esi), %ebx )
27373 SRC( movl 20(%esi), %edx )
27374 adcl %ebx, %eax
27375-DST( movl %ebx, 16(%edi) )
27376+DST( movl %ebx, %es:16(%edi) )
27377 adcl %edx, %eax
27378-DST( movl %edx, 20(%edi) )
27379+DST( movl %edx, %es:20(%edi) )
27380
27381 SRC( movl 24(%esi), %ebx )
27382 SRC( movl 28(%esi), %edx )
27383 adcl %ebx, %eax
27384-DST( movl %ebx, 24(%edi) )
27385+DST( movl %ebx, %es:24(%edi) )
27386 adcl %edx, %eax
27387-DST( movl %edx, 28(%edi) )
27388+DST( movl %edx, %es:28(%edi) )
27389
27390 lea 32(%esi), %esi
27391 lea 32(%edi), %edi
27392@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
27393 shrl $2, %edx # This clears CF
27394 SRC(3: movl (%esi), %ebx )
27395 adcl %ebx, %eax
27396-DST( movl %ebx, (%edi) )
27397+DST( movl %ebx, %es:(%edi) )
27398 lea 4(%esi), %esi
27399 lea 4(%edi), %edi
27400 dec %edx
27401@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
27402 jb 5f
27403 SRC( movw (%esi), %cx )
27404 leal 2(%esi), %esi
27405-DST( movw %cx, (%edi) )
27406+DST( movw %cx, %es:(%edi) )
27407 leal 2(%edi), %edi
27408 je 6f
27409 shll $16,%ecx
27410 SRC(5: movb (%esi), %cl )
27411-DST( movb %cl, (%edi) )
27412+DST( movb %cl, %es:(%edi) )
27413 6: addl %ecx, %eax
27414 adcl $0, %eax
27415 7:
27416@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
27417
27418 6001:
27419 movl ARGBASE+20(%esp), %ebx # src_err_ptr
27420- movl $-EFAULT, (%ebx)
27421+ movl $-EFAULT, %ss:(%ebx)
27422
27423 # zero the complete destination - computing the rest
27424 # is too much work
27425@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
27426
27427 6002:
27428 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
27429- movl $-EFAULT,(%ebx)
27430+ movl $-EFAULT,%ss:(%ebx)
27431 jmp 5000b
27432
27433 .previous
27434
27435+ pushl_cfi %ss
27436+ popl_cfi %ds
27437+ pushl_cfi %ss
27438+ popl_cfi %es
27439 popl_cfi %ebx
27440 CFI_RESTORE ebx
27441 popl_cfi %esi
27442@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
27443 popl_cfi %ecx # equivalent to addl $4,%esp
27444 ret
27445 CFI_ENDPROC
27446-ENDPROC(csum_partial_copy_generic)
27447+ENDPROC(csum_partial_copy_generic_to_user)
27448
27449 #else
27450
27451 /* Version for PentiumII/PPro */
27452
27453 #define ROUND1(x) \
27454+ nop; nop; nop; \
27455 SRC(movl x(%esi), %ebx ) ; \
27456 addl %ebx, %eax ; \
27457- DST(movl %ebx, x(%edi) ) ;
27458+ DST(movl %ebx, %es:x(%edi)) ;
27459
27460 #define ROUND(x) \
27461+ nop; nop; nop; \
27462 SRC(movl x(%esi), %ebx ) ; \
27463 adcl %ebx, %eax ; \
27464- DST(movl %ebx, x(%edi) ) ;
27465+ DST(movl %ebx, %es:x(%edi)) ;
27466
27467 #define ARGBASE 12
27468-
27469-ENTRY(csum_partial_copy_generic)
27470+
27471+ENTRY(csum_partial_copy_generic_to_user)
27472 CFI_STARTPROC
27473+
27474+#ifdef CONFIG_PAX_MEMORY_UDEREF
27475+ pushl_cfi %gs
27476+ popl_cfi %es
27477+ jmp csum_partial_copy_generic
27478+#endif
27479+
27480+ENTRY(csum_partial_copy_generic_from_user)
27481+
27482+#ifdef CONFIG_PAX_MEMORY_UDEREF
27483+ pushl_cfi %gs
27484+ popl_cfi %ds
27485+#endif
27486+
27487+ENTRY(csum_partial_copy_generic)
27488 pushl_cfi %ebx
27489 CFI_REL_OFFSET ebx, 0
27490 pushl_cfi %edi
27491@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
27492 subl %ebx, %edi
27493 lea -1(%esi),%edx
27494 andl $-32,%edx
27495- lea 3f(%ebx,%ebx), %ebx
27496+ lea 3f(%ebx,%ebx,2), %ebx
27497 testl %esi, %esi
27498 jmp *%ebx
27499 1: addl $64,%esi
27500@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
27501 jb 5f
27502 SRC( movw (%esi), %dx )
27503 leal 2(%esi), %esi
27504-DST( movw %dx, (%edi) )
27505+DST( movw %dx, %es:(%edi) )
27506 leal 2(%edi), %edi
27507 je 6f
27508 shll $16,%edx
27509 5:
27510 SRC( movb (%esi), %dl )
27511-DST( movb %dl, (%edi) )
27512+DST( movb %dl, %es:(%edi) )
27513 6: addl %edx, %eax
27514 adcl $0, %eax
27515 7:
27516 .section .fixup, "ax"
27517 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
27518- movl $-EFAULT, (%ebx)
27519+ movl $-EFAULT, %ss:(%ebx)
27520 # zero the complete destination (computing the rest is too much work)
27521 movl ARGBASE+8(%esp),%edi # dst
27522 movl ARGBASE+12(%esp),%ecx # len
27523@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
27524 rep; stosb
27525 jmp 7b
27526 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
27527- movl $-EFAULT, (%ebx)
27528+ movl $-EFAULT, %ss:(%ebx)
27529 jmp 7b
27530 .previous
27531
27532+#ifdef CONFIG_PAX_MEMORY_UDEREF
27533+ pushl_cfi %ss
27534+ popl_cfi %ds
27535+ pushl_cfi %ss
27536+ popl_cfi %es
27537+#endif
27538+
27539 popl_cfi %esi
27540 CFI_RESTORE esi
27541 popl_cfi %edi
27542@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
27543 CFI_RESTORE ebx
27544 ret
27545 CFI_ENDPROC
27546-ENDPROC(csum_partial_copy_generic)
27547+ENDPROC(csum_partial_copy_generic_to_user)
27548
27549 #undef ROUND
27550 #undef ROUND1
27551diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
27552index f2145cf..cea889d 100644
27553--- a/arch/x86/lib/clear_page_64.S
27554+++ b/arch/x86/lib/clear_page_64.S
27555@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
27556 movl $4096/8,%ecx
27557 xorl %eax,%eax
27558 rep stosq
27559+ pax_force_retaddr
27560 ret
27561 CFI_ENDPROC
27562 ENDPROC(clear_page_c)
27563@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
27564 movl $4096,%ecx
27565 xorl %eax,%eax
27566 rep stosb
27567+ pax_force_retaddr
27568 ret
27569 CFI_ENDPROC
27570 ENDPROC(clear_page_c_e)
27571@@ -43,6 +45,7 @@ ENTRY(clear_page)
27572 leaq 64(%rdi),%rdi
27573 jnz .Lloop
27574 nop
27575+ pax_force_retaddr
27576 ret
27577 CFI_ENDPROC
27578 .Lclear_page_end:
27579@@ -58,7 +61,7 @@ ENDPROC(clear_page)
27580
27581 #include <asm/cpufeature.h>
27582
27583- .section .altinstr_replacement,"ax"
27584+ .section .altinstr_replacement,"a"
27585 1: .byte 0xeb /* jmp <disp8> */
27586 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
27587 2: .byte 0xeb /* jmp <disp8> */
27588diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
27589index 1e572c5..2a162cd 100644
27590--- a/arch/x86/lib/cmpxchg16b_emu.S
27591+++ b/arch/x86/lib/cmpxchg16b_emu.S
27592@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
27593
27594 popf
27595 mov $1, %al
27596+ pax_force_retaddr
27597 ret
27598
27599 not_same:
27600 popf
27601 xor %al,%al
27602+ pax_force_retaddr
27603 ret
27604
27605 CFI_ENDPROC
27606diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
27607index 176cca6..1166c50 100644
27608--- a/arch/x86/lib/copy_page_64.S
27609+++ b/arch/x86/lib/copy_page_64.S
27610@@ -9,6 +9,7 @@ copy_page_rep:
27611 CFI_STARTPROC
27612 movl $4096/8, %ecx
27613 rep movsq
27614+ pax_force_retaddr
27615 ret
27616 CFI_ENDPROC
27617 ENDPROC(copy_page_rep)
27618@@ -20,12 +21,14 @@ ENDPROC(copy_page_rep)
27619
27620 ENTRY(copy_page)
27621 CFI_STARTPROC
27622- subq $2*8, %rsp
27623- CFI_ADJUST_CFA_OFFSET 2*8
27624+ subq $3*8, %rsp
27625+ CFI_ADJUST_CFA_OFFSET 3*8
27626 movq %rbx, (%rsp)
27627 CFI_REL_OFFSET rbx, 0
27628 movq %r12, 1*8(%rsp)
27629 CFI_REL_OFFSET r12, 1*8
27630+ movq %r13, 2*8(%rsp)
27631+ CFI_REL_OFFSET r13, 2*8
27632
27633 movl $(4096/64)-5, %ecx
27634 .p2align 4
27635@@ -36,7 +39,7 @@ ENTRY(copy_page)
27636 movq 0x8*2(%rsi), %rdx
27637 movq 0x8*3(%rsi), %r8
27638 movq 0x8*4(%rsi), %r9
27639- movq 0x8*5(%rsi), %r10
27640+ movq 0x8*5(%rsi), %r13
27641 movq 0x8*6(%rsi), %r11
27642 movq 0x8*7(%rsi), %r12
27643
27644@@ -47,7 +50,7 @@ ENTRY(copy_page)
27645 movq %rdx, 0x8*2(%rdi)
27646 movq %r8, 0x8*3(%rdi)
27647 movq %r9, 0x8*4(%rdi)
27648- movq %r10, 0x8*5(%rdi)
27649+ movq %r13, 0x8*5(%rdi)
27650 movq %r11, 0x8*6(%rdi)
27651 movq %r12, 0x8*7(%rdi)
27652
27653@@ -66,7 +69,7 @@ ENTRY(copy_page)
27654 movq 0x8*2(%rsi), %rdx
27655 movq 0x8*3(%rsi), %r8
27656 movq 0x8*4(%rsi), %r9
27657- movq 0x8*5(%rsi), %r10
27658+ movq 0x8*5(%rsi), %r13
27659 movq 0x8*6(%rsi), %r11
27660 movq 0x8*7(%rsi), %r12
27661
27662@@ -75,7 +78,7 @@ ENTRY(copy_page)
27663 movq %rdx, 0x8*2(%rdi)
27664 movq %r8, 0x8*3(%rdi)
27665 movq %r9, 0x8*4(%rdi)
27666- movq %r10, 0x8*5(%rdi)
27667+ movq %r13, 0x8*5(%rdi)
27668 movq %r11, 0x8*6(%rdi)
27669 movq %r12, 0x8*7(%rdi)
27670
27671@@ -87,8 +90,11 @@ ENTRY(copy_page)
27672 CFI_RESTORE rbx
27673 movq 1*8(%rsp), %r12
27674 CFI_RESTORE r12
27675- addq $2*8, %rsp
27676- CFI_ADJUST_CFA_OFFSET -2*8
27677+ movq 2*8(%rsp), %r13
27678+ CFI_RESTORE r13
27679+ addq $3*8, %rsp
27680+ CFI_ADJUST_CFA_OFFSET -3*8
27681+ pax_force_retaddr
27682 ret
27683 .Lcopy_page_end:
27684 CFI_ENDPROC
27685@@ -99,7 +105,7 @@ ENDPROC(copy_page)
27686
27687 #include <asm/cpufeature.h>
27688
27689- .section .altinstr_replacement,"ax"
27690+ .section .altinstr_replacement,"a"
27691 1: .byte 0xeb /* jmp <disp8> */
27692 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
27693 2:
27694diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
27695index a30ca15..6b3f4e1 100644
27696--- a/arch/x86/lib/copy_user_64.S
27697+++ b/arch/x86/lib/copy_user_64.S
27698@@ -18,31 +18,7 @@
27699 #include <asm/alternative-asm.h>
27700 #include <asm/asm.h>
27701 #include <asm/smap.h>
27702-
27703-/*
27704- * By placing feature2 after feature1 in altinstructions section, we logically
27705- * implement:
27706- * If CPU has feature2, jmp to alt2 is used
27707- * else if CPU has feature1, jmp to alt1 is used
27708- * else jmp to orig is used.
27709- */
27710- .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
27711-0:
27712- .byte 0xe9 /* 32bit jump */
27713- .long \orig-1f /* by default jump to orig */
27714-1:
27715- .section .altinstr_replacement,"ax"
27716-2: .byte 0xe9 /* near jump with 32bit immediate */
27717- .long \alt1-1b /* offset */ /* or alternatively to alt1 */
27718-3: .byte 0xe9 /* near jump with 32bit immediate */
27719- .long \alt2-1b /* offset */ /* or alternatively to alt2 */
27720- .previous
27721-
27722- .section .altinstructions,"a"
27723- altinstruction_entry 0b,2b,\feature1,5,5
27724- altinstruction_entry 0b,3b,\feature2,5,5
27725- .previous
27726- .endm
27727+#include <asm/pgtable.h>
27728
27729 .macro ALIGN_DESTINATION
27730 #ifdef FIX_ALIGNMENT
27731@@ -70,52 +46,6 @@
27732 #endif
27733 .endm
27734
27735-/* Standard copy_to_user with segment limit checking */
27736-ENTRY(_copy_to_user)
27737- CFI_STARTPROC
27738- GET_THREAD_INFO(%rax)
27739- movq %rdi,%rcx
27740- addq %rdx,%rcx
27741- jc bad_to_user
27742- cmpq TI_addr_limit(%rax),%rcx
27743- ja bad_to_user
27744- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
27745- copy_user_generic_unrolled,copy_user_generic_string, \
27746- copy_user_enhanced_fast_string
27747- CFI_ENDPROC
27748-ENDPROC(_copy_to_user)
27749-
27750-/* Standard copy_from_user with segment limit checking */
27751-ENTRY(_copy_from_user)
27752- CFI_STARTPROC
27753- GET_THREAD_INFO(%rax)
27754- movq %rsi,%rcx
27755- addq %rdx,%rcx
27756- jc bad_from_user
27757- cmpq TI_addr_limit(%rax),%rcx
27758- ja bad_from_user
27759- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
27760- copy_user_generic_unrolled,copy_user_generic_string, \
27761- copy_user_enhanced_fast_string
27762- CFI_ENDPROC
27763-ENDPROC(_copy_from_user)
27764-
27765- .section .fixup,"ax"
27766- /* must zero dest */
27767-ENTRY(bad_from_user)
27768-bad_from_user:
27769- CFI_STARTPROC
27770- movl %edx,%ecx
27771- xorl %eax,%eax
27772- rep
27773- stosb
27774-bad_to_user:
27775- movl %edx,%eax
27776- ret
27777- CFI_ENDPROC
27778-ENDPROC(bad_from_user)
27779- .previous
27780-
27781 /*
27782 * copy_user_generic_unrolled - memory copy with exception handling.
27783 * This version is for CPUs like P4 that don't have efficient micro
27784@@ -131,6 +61,7 @@ ENDPROC(bad_from_user)
27785 */
27786 ENTRY(copy_user_generic_unrolled)
27787 CFI_STARTPROC
27788+ ASM_PAX_OPEN_USERLAND
27789 ASM_STAC
27790 cmpl $8,%edx
27791 jb 20f /* less then 8 bytes, go to byte copy loop */
27792@@ -141,19 +72,19 @@ ENTRY(copy_user_generic_unrolled)
27793 jz 17f
27794 1: movq (%rsi),%r8
27795 2: movq 1*8(%rsi),%r9
27796-3: movq 2*8(%rsi),%r10
27797+3: movq 2*8(%rsi),%rax
27798 4: movq 3*8(%rsi),%r11
27799 5: movq %r8,(%rdi)
27800 6: movq %r9,1*8(%rdi)
27801-7: movq %r10,2*8(%rdi)
27802+7: movq %rax,2*8(%rdi)
27803 8: movq %r11,3*8(%rdi)
27804 9: movq 4*8(%rsi),%r8
27805 10: movq 5*8(%rsi),%r9
27806-11: movq 6*8(%rsi),%r10
27807+11: movq 6*8(%rsi),%rax
27808 12: movq 7*8(%rsi),%r11
27809 13: movq %r8,4*8(%rdi)
27810 14: movq %r9,5*8(%rdi)
27811-15: movq %r10,6*8(%rdi)
27812+15: movq %rax,6*8(%rdi)
27813 16: movq %r11,7*8(%rdi)
27814 leaq 64(%rsi),%rsi
27815 leaq 64(%rdi),%rdi
27816@@ -180,6 +111,8 @@ ENTRY(copy_user_generic_unrolled)
27817 jnz 21b
27818 23: xor %eax,%eax
27819 ASM_CLAC
27820+ ASM_PAX_CLOSE_USERLAND
27821+ pax_force_retaddr
27822 ret
27823
27824 .section .fixup,"ax"
27825@@ -235,6 +168,7 @@ ENDPROC(copy_user_generic_unrolled)
27826 */
27827 ENTRY(copy_user_generic_string)
27828 CFI_STARTPROC
27829+ ASM_PAX_OPEN_USERLAND
27830 ASM_STAC
27831 andl %edx,%edx
27832 jz 4f
27833@@ -251,6 +185,8 @@ ENTRY(copy_user_generic_string)
27834 movsb
27835 4: xorl %eax,%eax
27836 ASM_CLAC
27837+ ASM_PAX_CLOSE_USERLAND
27838+ pax_force_retaddr
27839 ret
27840
27841 .section .fixup,"ax"
27842@@ -278,6 +214,7 @@ ENDPROC(copy_user_generic_string)
27843 */
27844 ENTRY(copy_user_enhanced_fast_string)
27845 CFI_STARTPROC
27846+ ASM_PAX_OPEN_USERLAND
27847 ASM_STAC
27848 andl %edx,%edx
27849 jz 2f
27850@@ -286,6 +223,8 @@ ENTRY(copy_user_enhanced_fast_string)
27851 movsb
27852 2: xorl %eax,%eax
27853 ASM_CLAC
27854+ ASM_PAX_CLOSE_USERLAND
27855+ pax_force_retaddr
27856 ret
27857
27858 .section .fixup,"ax"
27859diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
27860index 6a4f43c..55d26f2 100644
27861--- a/arch/x86/lib/copy_user_nocache_64.S
27862+++ b/arch/x86/lib/copy_user_nocache_64.S
27863@@ -8,6 +8,7 @@
27864
27865 #include <linux/linkage.h>
27866 #include <asm/dwarf2.h>
27867+#include <asm/alternative-asm.h>
27868
27869 #define FIX_ALIGNMENT 1
27870
27871@@ -16,6 +17,7 @@
27872 #include <asm/thread_info.h>
27873 #include <asm/asm.h>
27874 #include <asm/smap.h>
27875+#include <asm/pgtable.h>
27876
27877 .macro ALIGN_DESTINATION
27878 #ifdef FIX_ALIGNMENT
27879@@ -49,6 +51,16 @@
27880 */
27881 ENTRY(__copy_user_nocache)
27882 CFI_STARTPROC
27883+
27884+#ifdef CONFIG_PAX_MEMORY_UDEREF
27885+ mov pax_user_shadow_base,%rcx
27886+ cmp %rcx,%rsi
27887+ jae 1f
27888+ add %rcx,%rsi
27889+1:
27890+#endif
27891+
27892+ ASM_PAX_OPEN_USERLAND
27893 ASM_STAC
27894 cmpl $8,%edx
27895 jb 20f /* less then 8 bytes, go to byte copy loop */
27896@@ -59,19 +71,19 @@ ENTRY(__copy_user_nocache)
27897 jz 17f
27898 1: movq (%rsi),%r8
27899 2: movq 1*8(%rsi),%r9
27900-3: movq 2*8(%rsi),%r10
27901+3: movq 2*8(%rsi),%rax
27902 4: movq 3*8(%rsi),%r11
27903 5: movnti %r8,(%rdi)
27904 6: movnti %r9,1*8(%rdi)
27905-7: movnti %r10,2*8(%rdi)
27906+7: movnti %rax,2*8(%rdi)
27907 8: movnti %r11,3*8(%rdi)
27908 9: movq 4*8(%rsi),%r8
27909 10: movq 5*8(%rsi),%r9
27910-11: movq 6*8(%rsi),%r10
27911+11: movq 6*8(%rsi),%rax
27912 12: movq 7*8(%rsi),%r11
27913 13: movnti %r8,4*8(%rdi)
27914 14: movnti %r9,5*8(%rdi)
27915-15: movnti %r10,6*8(%rdi)
27916+15: movnti %rax,6*8(%rdi)
27917 16: movnti %r11,7*8(%rdi)
27918 leaq 64(%rsi),%rsi
27919 leaq 64(%rdi),%rdi
27920@@ -98,7 +110,9 @@ ENTRY(__copy_user_nocache)
27921 jnz 21b
27922 23: xorl %eax,%eax
27923 ASM_CLAC
27924+ ASM_PAX_CLOSE_USERLAND
27925 sfence
27926+ pax_force_retaddr
27927 ret
27928
27929 .section .fixup,"ax"
27930diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
27931index 2419d5f..953ee51 100644
27932--- a/arch/x86/lib/csum-copy_64.S
27933+++ b/arch/x86/lib/csum-copy_64.S
27934@@ -9,6 +9,7 @@
27935 #include <asm/dwarf2.h>
27936 #include <asm/errno.h>
27937 #include <asm/asm.h>
27938+#include <asm/alternative-asm.h>
27939
27940 /*
27941 * Checksum copy with exception handling.
27942@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
27943 CFI_RESTORE rbp
27944 addq $7*8, %rsp
27945 CFI_ADJUST_CFA_OFFSET -7*8
27946+ pax_force_retaddr 0, 1
27947 ret
27948 CFI_RESTORE_STATE
27949
27950diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
27951index 25b7ae8..c40113e 100644
27952--- a/arch/x86/lib/csum-wrappers_64.c
27953+++ b/arch/x86/lib/csum-wrappers_64.c
27954@@ -52,8 +52,12 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
27955 len -= 2;
27956 }
27957 }
27958- isum = csum_partial_copy_generic((__force const void *)src,
27959+ pax_open_userland();
27960+ stac();
27961+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
27962 dst, len, isum, errp, NULL);
27963+ clac();
27964+ pax_close_userland();
27965 if (unlikely(*errp))
27966 goto out_err;
27967
27968@@ -105,8 +109,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
27969 }
27970
27971 *errp = 0;
27972- return csum_partial_copy_generic(src, (void __force *)dst,
27973+ pax_open_userland();
27974+ stac();
27975+ isum = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
27976 len, isum, NULL, errp);
27977+ clac();
27978+ pax_close_userland();
27979+ return isum;
27980 }
27981 EXPORT_SYMBOL(csum_partial_copy_to_user);
27982
27983diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
27984index a451235..1daa956 100644
27985--- a/arch/x86/lib/getuser.S
27986+++ b/arch/x86/lib/getuser.S
27987@@ -33,17 +33,40 @@
27988 #include <asm/thread_info.h>
27989 #include <asm/asm.h>
27990 #include <asm/smap.h>
27991+#include <asm/segment.h>
27992+#include <asm/pgtable.h>
27993+#include <asm/alternative-asm.h>
27994+
27995+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
27996+#define __copyuser_seg gs;
27997+#else
27998+#define __copyuser_seg
27999+#endif
28000
28001 .text
28002 ENTRY(__get_user_1)
28003 CFI_STARTPROC
28004+
28005+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
28006 GET_THREAD_INFO(%_ASM_DX)
28007 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
28008 jae bad_get_user
28009 ASM_STAC
28010-1: movzbl (%_ASM_AX),%edx
28011+
28012+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28013+ mov pax_user_shadow_base,%_ASM_DX
28014+ cmp %_ASM_DX,%_ASM_AX
28015+ jae 1234f
28016+ add %_ASM_DX,%_ASM_AX
28017+1234:
28018+#endif
28019+
28020+#endif
28021+
28022+1: __copyuser_seg movzbl (%_ASM_AX),%edx
28023 xor %eax,%eax
28024 ASM_CLAC
28025+ pax_force_retaddr
28026 ret
28027 CFI_ENDPROC
28028 ENDPROC(__get_user_1)
28029@@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
28030 ENTRY(__get_user_2)
28031 CFI_STARTPROC
28032 add $1,%_ASM_AX
28033+
28034+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
28035 jc bad_get_user
28036 GET_THREAD_INFO(%_ASM_DX)
28037 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
28038 jae bad_get_user
28039 ASM_STAC
28040-2: movzwl -1(%_ASM_AX),%edx
28041+
28042+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28043+ mov pax_user_shadow_base,%_ASM_DX
28044+ cmp %_ASM_DX,%_ASM_AX
28045+ jae 1234f
28046+ add %_ASM_DX,%_ASM_AX
28047+1234:
28048+#endif
28049+
28050+#endif
28051+
28052+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
28053 xor %eax,%eax
28054 ASM_CLAC
28055+ pax_force_retaddr
28056 ret
28057 CFI_ENDPROC
28058 ENDPROC(__get_user_2)
28059@@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
28060 ENTRY(__get_user_4)
28061 CFI_STARTPROC
28062 add $3,%_ASM_AX
28063+
28064+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
28065 jc bad_get_user
28066 GET_THREAD_INFO(%_ASM_DX)
28067 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
28068 jae bad_get_user
28069 ASM_STAC
28070-3: movl -3(%_ASM_AX),%edx
28071+
28072+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28073+ mov pax_user_shadow_base,%_ASM_DX
28074+ cmp %_ASM_DX,%_ASM_AX
28075+ jae 1234f
28076+ add %_ASM_DX,%_ASM_AX
28077+1234:
28078+#endif
28079+
28080+#endif
28081+
28082+3: __copyuser_seg movl -3(%_ASM_AX),%edx
28083 xor %eax,%eax
28084 ASM_CLAC
28085+ pax_force_retaddr
28086 ret
28087 CFI_ENDPROC
28088 ENDPROC(__get_user_4)
28089@@ -86,10 +137,20 @@ ENTRY(__get_user_8)
28090 GET_THREAD_INFO(%_ASM_DX)
28091 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
28092 jae bad_get_user
28093+
28094+#ifdef CONFIG_PAX_MEMORY_UDEREF
28095+ mov pax_user_shadow_base,%_ASM_DX
28096+ cmp %_ASM_DX,%_ASM_AX
28097+ jae 1234f
28098+ add %_ASM_DX,%_ASM_AX
28099+1234:
28100+#endif
28101+
28102 ASM_STAC
28103 4: movq -7(%_ASM_AX),%rdx
28104 xor %eax,%eax
28105 ASM_CLAC
28106+ pax_force_retaddr
28107 ret
28108 #else
28109 add $7,%_ASM_AX
28110@@ -98,10 +159,11 @@ ENTRY(__get_user_8)
28111 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
28112 jae bad_get_user_8
28113 ASM_STAC
28114-4: movl -7(%_ASM_AX),%edx
28115-5: movl -3(%_ASM_AX),%ecx
28116+4: __copyuser_seg movl -7(%_ASM_AX),%edx
28117+5: __copyuser_seg movl -3(%_ASM_AX),%ecx
28118 xor %eax,%eax
28119 ASM_CLAC
28120+ pax_force_retaddr
28121 ret
28122 #endif
28123 CFI_ENDPROC
28124@@ -113,6 +175,7 @@ bad_get_user:
28125 xor %edx,%edx
28126 mov $(-EFAULT),%_ASM_AX
28127 ASM_CLAC
28128+ pax_force_retaddr
28129 ret
28130 CFI_ENDPROC
28131 END(bad_get_user)
28132@@ -124,6 +187,7 @@ bad_get_user_8:
28133 xor %ecx,%ecx
28134 mov $(-EFAULT),%_ASM_AX
28135 ASM_CLAC
28136+ pax_force_retaddr
28137 ret
28138 CFI_ENDPROC
28139 END(bad_get_user_8)
28140diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
28141index 54fcffe..7be149e 100644
28142--- a/arch/x86/lib/insn.c
28143+++ b/arch/x86/lib/insn.c
28144@@ -20,8 +20,10 @@
28145
28146 #ifdef __KERNEL__
28147 #include <linux/string.h>
28148+#include <asm/pgtable_types.h>
28149 #else
28150 #include <string.h>
28151+#define ktla_ktva(addr) addr
28152 #endif
28153 #include <asm/inat.h>
28154 #include <asm/insn.h>
28155@@ -53,8 +55,8 @@
28156 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
28157 {
28158 memset(insn, 0, sizeof(*insn));
28159- insn->kaddr = kaddr;
28160- insn->next_byte = kaddr;
28161+ insn->kaddr = ktla_ktva(kaddr);
28162+ insn->next_byte = ktla_ktva(kaddr);
28163 insn->x86_64 = x86_64 ? 1 : 0;
28164 insn->opnd_bytes = 4;
28165 if (x86_64)
28166diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
28167index 05a95e7..326f2fa 100644
28168--- a/arch/x86/lib/iomap_copy_64.S
28169+++ b/arch/x86/lib/iomap_copy_64.S
28170@@ -17,6 +17,7 @@
28171
28172 #include <linux/linkage.h>
28173 #include <asm/dwarf2.h>
28174+#include <asm/alternative-asm.h>
28175
28176 /*
28177 * override generic version in lib/iomap_copy.c
28178@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
28179 CFI_STARTPROC
28180 movl %edx,%ecx
28181 rep movsd
28182+ pax_force_retaddr
28183 ret
28184 CFI_ENDPROC
28185 ENDPROC(__iowrite32_copy)
28186diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
28187index 56313a3..9b59269 100644
28188--- a/arch/x86/lib/memcpy_64.S
28189+++ b/arch/x86/lib/memcpy_64.S
28190@@ -24,7 +24,7 @@
28191 * This gets patched over the unrolled variant (below) via the
28192 * alternative instructions framework:
28193 */
28194- .section .altinstr_replacement, "ax", @progbits
28195+ .section .altinstr_replacement, "a", @progbits
28196 .Lmemcpy_c:
28197 movq %rdi, %rax
28198 movq %rdx, %rcx
28199@@ -33,6 +33,7 @@
28200 rep movsq
28201 movl %edx, %ecx
28202 rep movsb
28203+ pax_force_retaddr
28204 ret
28205 .Lmemcpy_e:
28206 .previous
28207@@ -44,11 +45,12 @@
28208 * This gets patched over the unrolled variant (below) via the
28209 * alternative instructions framework:
28210 */
28211- .section .altinstr_replacement, "ax", @progbits
28212+ .section .altinstr_replacement, "a", @progbits
28213 .Lmemcpy_c_e:
28214 movq %rdi, %rax
28215 movq %rdx, %rcx
28216 rep movsb
28217+ pax_force_retaddr
28218 ret
28219 .Lmemcpy_e_e:
28220 .previous
28221@@ -76,13 +78,13 @@ ENTRY(memcpy)
28222 */
28223 movq 0*8(%rsi), %r8
28224 movq 1*8(%rsi), %r9
28225- movq 2*8(%rsi), %r10
28226+ movq 2*8(%rsi), %rcx
28227 movq 3*8(%rsi), %r11
28228 leaq 4*8(%rsi), %rsi
28229
28230 movq %r8, 0*8(%rdi)
28231 movq %r9, 1*8(%rdi)
28232- movq %r10, 2*8(%rdi)
28233+ movq %rcx, 2*8(%rdi)
28234 movq %r11, 3*8(%rdi)
28235 leaq 4*8(%rdi), %rdi
28236 jae .Lcopy_forward_loop
28237@@ -105,12 +107,12 @@ ENTRY(memcpy)
28238 subq $0x20, %rdx
28239 movq -1*8(%rsi), %r8
28240 movq -2*8(%rsi), %r9
28241- movq -3*8(%rsi), %r10
28242+ movq -3*8(%rsi), %rcx
28243 movq -4*8(%rsi), %r11
28244 leaq -4*8(%rsi), %rsi
28245 movq %r8, -1*8(%rdi)
28246 movq %r9, -2*8(%rdi)
28247- movq %r10, -3*8(%rdi)
28248+ movq %rcx, -3*8(%rdi)
28249 movq %r11, -4*8(%rdi)
28250 leaq -4*8(%rdi), %rdi
28251 jae .Lcopy_backward_loop
28252@@ -130,12 +132,13 @@ ENTRY(memcpy)
28253 */
28254 movq 0*8(%rsi), %r8
28255 movq 1*8(%rsi), %r9
28256- movq -2*8(%rsi, %rdx), %r10
28257+ movq -2*8(%rsi, %rdx), %rcx
28258 movq -1*8(%rsi, %rdx), %r11
28259 movq %r8, 0*8(%rdi)
28260 movq %r9, 1*8(%rdi)
28261- movq %r10, -2*8(%rdi, %rdx)
28262+ movq %rcx, -2*8(%rdi, %rdx)
28263 movq %r11, -1*8(%rdi, %rdx)
28264+ pax_force_retaddr
28265 retq
28266 .p2align 4
28267 .Lless_16bytes:
28268@@ -148,6 +151,7 @@ ENTRY(memcpy)
28269 movq -1*8(%rsi, %rdx), %r9
28270 movq %r8, 0*8(%rdi)
28271 movq %r9, -1*8(%rdi, %rdx)
28272+ pax_force_retaddr
28273 retq
28274 .p2align 4
28275 .Lless_8bytes:
28276@@ -161,6 +165,7 @@ ENTRY(memcpy)
28277 movl -4(%rsi, %rdx), %r8d
28278 movl %ecx, (%rdi)
28279 movl %r8d, -4(%rdi, %rdx)
28280+ pax_force_retaddr
28281 retq
28282 .p2align 4
28283 .Lless_3bytes:
28284@@ -179,6 +184,7 @@ ENTRY(memcpy)
28285 movb %cl, (%rdi)
28286
28287 .Lend:
28288+ pax_force_retaddr
28289 retq
28290 CFI_ENDPROC
28291 ENDPROC(memcpy)
28292diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
28293index 65268a6..5aa7815 100644
28294--- a/arch/x86/lib/memmove_64.S
28295+++ b/arch/x86/lib/memmove_64.S
28296@@ -61,13 +61,13 @@ ENTRY(memmove)
28297 5:
28298 sub $0x20, %rdx
28299 movq 0*8(%rsi), %r11
28300- movq 1*8(%rsi), %r10
28301+ movq 1*8(%rsi), %rcx
28302 movq 2*8(%rsi), %r9
28303 movq 3*8(%rsi), %r8
28304 leaq 4*8(%rsi), %rsi
28305
28306 movq %r11, 0*8(%rdi)
28307- movq %r10, 1*8(%rdi)
28308+ movq %rcx, 1*8(%rdi)
28309 movq %r9, 2*8(%rdi)
28310 movq %r8, 3*8(%rdi)
28311 leaq 4*8(%rdi), %rdi
28312@@ -81,10 +81,10 @@ ENTRY(memmove)
28313 4:
28314 movq %rdx, %rcx
28315 movq -8(%rsi, %rdx), %r11
28316- lea -8(%rdi, %rdx), %r10
28317+ lea -8(%rdi, %rdx), %r9
28318 shrq $3, %rcx
28319 rep movsq
28320- movq %r11, (%r10)
28321+ movq %r11, (%r9)
28322 jmp 13f
28323 .Lmemmove_end_forward:
28324
28325@@ -95,14 +95,14 @@ ENTRY(memmove)
28326 7:
28327 movq %rdx, %rcx
28328 movq (%rsi), %r11
28329- movq %rdi, %r10
28330+ movq %rdi, %r9
28331 leaq -8(%rsi, %rdx), %rsi
28332 leaq -8(%rdi, %rdx), %rdi
28333 shrq $3, %rcx
28334 std
28335 rep movsq
28336 cld
28337- movq %r11, (%r10)
28338+ movq %r11, (%r9)
28339 jmp 13f
28340
28341 /*
28342@@ -127,13 +127,13 @@ ENTRY(memmove)
28343 8:
28344 subq $0x20, %rdx
28345 movq -1*8(%rsi), %r11
28346- movq -2*8(%rsi), %r10
28347+ movq -2*8(%rsi), %rcx
28348 movq -3*8(%rsi), %r9
28349 movq -4*8(%rsi), %r8
28350 leaq -4*8(%rsi), %rsi
28351
28352 movq %r11, -1*8(%rdi)
28353- movq %r10, -2*8(%rdi)
28354+ movq %rcx, -2*8(%rdi)
28355 movq %r9, -3*8(%rdi)
28356 movq %r8, -4*8(%rdi)
28357 leaq -4*8(%rdi), %rdi
28358@@ -151,11 +151,11 @@ ENTRY(memmove)
28359 * Move data from 16 bytes to 31 bytes.
28360 */
28361 movq 0*8(%rsi), %r11
28362- movq 1*8(%rsi), %r10
28363+ movq 1*8(%rsi), %rcx
28364 movq -2*8(%rsi, %rdx), %r9
28365 movq -1*8(%rsi, %rdx), %r8
28366 movq %r11, 0*8(%rdi)
28367- movq %r10, 1*8(%rdi)
28368+ movq %rcx, 1*8(%rdi)
28369 movq %r9, -2*8(%rdi, %rdx)
28370 movq %r8, -1*8(%rdi, %rdx)
28371 jmp 13f
28372@@ -167,9 +167,9 @@ ENTRY(memmove)
28373 * Move data from 8 bytes to 15 bytes.
28374 */
28375 movq 0*8(%rsi), %r11
28376- movq -1*8(%rsi, %rdx), %r10
28377+ movq -1*8(%rsi, %rdx), %r9
28378 movq %r11, 0*8(%rdi)
28379- movq %r10, -1*8(%rdi, %rdx)
28380+ movq %r9, -1*8(%rdi, %rdx)
28381 jmp 13f
28382 10:
28383 cmpq $4, %rdx
28384@@ -178,9 +178,9 @@ ENTRY(memmove)
28385 * Move data from 4 bytes to 7 bytes.
28386 */
28387 movl (%rsi), %r11d
28388- movl -4(%rsi, %rdx), %r10d
28389+ movl -4(%rsi, %rdx), %r9d
28390 movl %r11d, (%rdi)
28391- movl %r10d, -4(%rdi, %rdx)
28392+ movl %r9d, -4(%rdi, %rdx)
28393 jmp 13f
28394 11:
28395 cmp $2, %rdx
28396@@ -189,9 +189,9 @@ ENTRY(memmove)
28397 * Move data from 2 bytes to 3 bytes.
28398 */
28399 movw (%rsi), %r11w
28400- movw -2(%rsi, %rdx), %r10w
28401+ movw -2(%rsi, %rdx), %r9w
28402 movw %r11w, (%rdi)
28403- movw %r10w, -2(%rdi, %rdx)
28404+ movw %r9w, -2(%rdi, %rdx)
28405 jmp 13f
28406 12:
28407 cmp $1, %rdx
28408@@ -202,14 +202,16 @@ ENTRY(memmove)
28409 movb (%rsi), %r11b
28410 movb %r11b, (%rdi)
28411 13:
28412+ pax_force_retaddr
28413 retq
28414 CFI_ENDPROC
28415
28416- .section .altinstr_replacement,"ax"
28417+ .section .altinstr_replacement,"a"
28418 .Lmemmove_begin_forward_efs:
28419 /* Forward moving data. */
28420 movq %rdx, %rcx
28421 rep movsb
28422+ pax_force_retaddr
28423 retq
28424 .Lmemmove_end_forward_efs:
28425 .previous
28426diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
28427index 2dcb380..50a78bc 100644
28428--- a/arch/x86/lib/memset_64.S
28429+++ b/arch/x86/lib/memset_64.S
28430@@ -16,7 +16,7 @@
28431 *
28432 * rax original destination
28433 */
28434- .section .altinstr_replacement, "ax", @progbits
28435+ .section .altinstr_replacement, "a", @progbits
28436 .Lmemset_c:
28437 movq %rdi,%r9
28438 movq %rdx,%rcx
28439@@ -30,6 +30,7 @@
28440 movl %edx,%ecx
28441 rep stosb
28442 movq %r9,%rax
28443+ pax_force_retaddr
28444 ret
28445 .Lmemset_e:
28446 .previous
28447@@ -45,13 +46,14 @@
28448 *
28449 * rax original destination
28450 */
28451- .section .altinstr_replacement, "ax", @progbits
28452+ .section .altinstr_replacement, "a", @progbits
28453 .Lmemset_c_e:
28454 movq %rdi,%r9
28455 movb %sil,%al
28456 movq %rdx,%rcx
28457 rep stosb
28458 movq %r9,%rax
28459+ pax_force_retaddr
28460 ret
28461 .Lmemset_e_e:
28462 .previous
28463@@ -59,7 +61,7 @@
28464 ENTRY(memset)
28465 ENTRY(__memset)
28466 CFI_STARTPROC
28467- movq %rdi,%r10
28468+ movq %rdi,%r11
28469
28470 /* expand byte value */
28471 movzbl %sil,%ecx
28472@@ -117,7 +119,8 @@ ENTRY(__memset)
28473 jnz .Lloop_1
28474
28475 .Lende:
28476- movq %r10,%rax
28477+ movq %r11,%rax
28478+ pax_force_retaddr
28479 ret
28480
28481 CFI_RESTORE_STATE
28482diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
28483index c9f2d9b..e7fd2c0 100644
28484--- a/arch/x86/lib/mmx_32.c
28485+++ b/arch/x86/lib/mmx_32.c
28486@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
28487 {
28488 void *p;
28489 int i;
28490+ unsigned long cr0;
28491
28492 if (unlikely(in_interrupt()))
28493 return __memcpy(to, from, len);
28494@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
28495 kernel_fpu_begin();
28496
28497 __asm__ __volatile__ (
28498- "1: prefetch (%0)\n" /* This set is 28 bytes */
28499- " prefetch 64(%0)\n"
28500- " prefetch 128(%0)\n"
28501- " prefetch 192(%0)\n"
28502- " prefetch 256(%0)\n"
28503+ "1: prefetch (%1)\n" /* This set is 28 bytes */
28504+ " prefetch 64(%1)\n"
28505+ " prefetch 128(%1)\n"
28506+ " prefetch 192(%1)\n"
28507+ " prefetch 256(%1)\n"
28508 "2: \n"
28509 ".section .fixup, \"ax\"\n"
28510- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
28511+ "3: \n"
28512+
28513+#ifdef CONFIG_PAX_KERNEXEC
28514+ " movl %%cr0, %0\n"
28515+ " movl %0, %%eax\n"
28516+ " andl $0xFFFEFFFF, %%eax\n"
28517+ " movl %%eax, %%cr0\n"
28518+#endif
28519+
28520+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
28521+
28522+#ifdef CONFIG_PAX_KERNEXEC
28523+ " movl %0, %%cr0\n"
28524+#endif
28525+
28526 " jmp 2b\n"
28527 ".previous\n"
28528 _ASM_EXTABLE(1b, 3b)
28529- : : "r" (from));
28530+ : "=&r" (cr0) : "r" (from) : "ax");
28531
28532 for ( ; i > 5; i--) {
28533 __asm__ __volatile__ (
28534- "1: prefetch 320(%0)\n"
28535- "2: movq (%0), %%mm0\n"
28536- " movq 8(%0), %%mm1\n"
28537- " movq 16(%0), %%mm2\n"
28538- " movq 24(%0), %%mm3\n"
28539- " movq %%mm0, (%1)\n"
28540- " movq %%mm1, 8(%1)\n"
28541- " movq %%mm2, 16(%1)\n"
28542- " movq %%mm3, 24(%1)\n"
28543- " movq 32(%0), %%mm0\n"
28544- " movq 40(%0), %%mm1\n"
28545- " movq 48(%0), %%mm2\n"
28546- " movq 56(%0), %%mm3\n"
28547- " movq %%mm0, 32(%1)\n"
28548- " movq %%mm1, 40(%1)\n"
28549- " movq %%mm2, 48(%1)\n"
28550- " movq %%mm3, 56(%1)\n"
28551+ "1: prefetch 320(%1)\n"
28552+ "2: movq (%1), %%mm0\n"
28553+ " movq 8(%1), %%mm1\n"
28554+ " movq 16(%1), %%mm2\n"
28555+ " movq 24(%1), %%mm3\n"
28556+ " movq %%mm0, (%2)\n"
28557+ " movq %%mm1, 8(%2)\n"
28558+ " movq %%mm2, 16(%2)\n"
28559+ " movq %%mm3, 24(%2)\n"
28560+ " movq 32(%1), %%mm0\n"
28561+ " movq 40(%1), %%mm1\n"
28562+ " movq 48(%1), %%mm2\n"
28563+ " movq 56(%1), %%mm3\n"
28564+ " movq %%mm0, 32(%2)\n"
28565+ " movq %%mm1, 40(%2)\n"
28566+ " movq %%mm2, 48(%2)\n"
28567+ " movq %%mm3, 56(%2)\n"
28568 ".section .fixup, \"ax\"\n"
28569- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
28570+ "3:\n"
28571+
28572+#ifdef CONFIG_PAX_KERNEXEC
28573+ " movl %%cr0, %0\n"
28574+ " movl %0, %%eax\n"
28575+ " andl $0xFFFEFFFF, %%eax\n"
28576+ " movl %%eax, %%cr0\n"
28577+#endif
28578+
28579+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
28580+
28581+#ifdef CONFIG_PAX_KERNEXEC
28582+ " movl %0, %%cr0\n"
28583+#endif
28584+
28585 " jmp 2b\n"
28586 ".previous\n"
28587 _ASM_EXTABLE(1b, 3b)
28588- : : "r" (from), "r" (to) : "memory");
28589+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
28590
28591 from += 64;
28592 to += 64;
28593@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
28594 static void fast_copy_page(void *to, void *from)
28595 {
28596 int i;
28597+ unsigned long cr0;
28598
28599 kernel_fpu_begin();
28600
28601@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
28602 * but that is for later. -AV
28603 */
28604 __asm__ __volatile__(
28605- "1: prefetch (%0)\n"
28606- " prefetch 64(%0)\n"
28607- " prefetch 128(%0)\n"
28608- " prefetch 192(%0)\n"
28609- " prefetch 256(%0)\n"
28610+ "1: prefetch (%1)\n"
28611+ " prefetch 64(%1)\n"
28612+ " prefetch 128(%1)\n"
28613+ " prefetch 192(%1)\n"
28614+ " prefetch 256(%1)\n"
28615 "2: \n"
28616 ".section .fixup, \"ax\"\n"
28617- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
28618+ "3: \n"
28619+
28620+#ifdef CONFIG_PAX_KERNEXEC
28621+ " movl %%cr0, %0\n"
28622+ " movl %0, %%eax\n"
28623+ " andl $0xFFFEFFFF, %%eax\n"
28624+ " movl %%eax, %%cr0\n"
28625+#endif
28626+
28627+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
28628+
28629+#ifdef CONFIG_PAX_KERNEXEC
28630+ " movl %0, %%cr0\n"
28631+#endif
28632+
28633 " jmp 2b\n"
28634 ".previous\n"
28635- _ASM_EXTABLE(1b, 3b) : : "r" (from));
28636+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
28637
28638 for (i = 0; i < (4096-320)/64; i++) {
28639 __asm__ __volatile__ (
28640- "1: prefetch 320(%0)\n"
28641- "2: movq (%0), %%mm0\n"
28642- " movntq %%mm0, (%1)\n"
28643- " movq 8(%0), %%mm1\n"
28644- " movntq %%mm1, 8(%1)\n"
28645- " movq 16(%0), %%mm2\n"
28646- " movntq %%mm2, 16(%1)\n"
28647- " movq 24(%0), %%mm3\n"
28648- " movntq %%mm3, 24(%1)\n"
28649- " movq 32(%0), %%mm4\n"
28650- " movntq %%mm4, 32(%1)\n"
28651- " movq 40(%0), %%mm5\n"
28652- " movntq %%mm5, 40(%1)\n"
28653- " movq 48(%0), %%mm6\n"
28654- " movntq %%mm6, 48(%1)\n"
28655- " movq 56(%0), %%mm7\n"
28656- " movntq %%mm7, 56(%1)\n"
28657+ "1: prefetch 320(%1)\n"
28658+ "2: movq (%1), %%mm0\n"
28659+ " movntq %%mm0, (%2)\n"
28660+ " movq 8(%1), %%mm1\n"
28661+ " movntq %%mm1, 8(%2)\n"
28662+ " movq 16(%1), %%mm2\n"
28663+ " movntq %%mm2, 16(%2)\n"
28664+ " movq 24(%1), %%mm3\n"
28665+ " movntq %%mm3, 24(%2)\n"
28666+ " movq 32(%1), %%mm4\n"
28667+ " movntq %%mm4, 32(%2)\n"
28668+ " movq 40(%1), %%mm5\n"
28669+ " movntq %%mm5, 40(%2)\n"
28670+ " movq 48(%1), %%mm6\n"
28671+ " movntq %%mm6, 48(%2)\n"
28672+ " movq 56(%1), %%mm7\n"
28673+ " movntq %%mm7, 56(%2)\n"
28674 ".section .fixup, \"ax\"\n"
28675- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
28676+ "3:\n"
28677+
28678+#ifdef CONFIG_PAX_KERNEXEC
28679+ " movl %%cr0, %0\n"
28680+ " movl %0, %%eax\n"
28681+ " andl $0xFFFEFFFF, %%eax\n"
28682+ " movl %%eax, %%cr0\n"
28683+#endif
28684+
28685+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
28686+
28687+#ifdef CONFIG_PAX_KERNEXEC
28688+ " movl %0, %%cr0\n"
28689+#endif
28690+
28691 " jmp 2b\n"
28692 ".previous\n"
28693- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
28694+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
28695
28696 from += 64;
28697 to += 64;
28698@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
28699 static void fast_copy_page(void *to, void *from)
28700 {
28701 int i;
28702+ unsigned long cr0;
28703
28704 kernel_fpu_begin();
28705
28706 __asm__ __volatile__ (
28707- "1: prefetch (%0)\n"
28708- " prefetch 64(%0)\n"
28709- " prefetch 128(%0)\n"
28710- " prefetch 192(%0)\n"
28711- " prefetch 256(%0)\n"
28712+ "1: prefetch (%1)\n"
28713+ " prefetch 64(%1)\n"
28714+ " prefetch 128(%1)\n"
28715+ " prefetch 192(%1)\n"
28716+ " prefetch 256(%1)\n"
28717 "2: \n"
28718 ".section .fixup, \"ax\"\n"
28719- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
28720+ "3: \n"
28721+
28722+#ifdef CONFIG_PAX_KERNEXEC
28723+ " movl %%cr0, %0\n"
28724+ " movl %0, %%eax\n"
28725+ " andl $0xFFFEFFFF, %%eax\n"
28726+ " movl %%eax, %%cr0\n"
28727+#endif
28728+
28729+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
28730+
28731+#ifdef CONFIG_PAX_KERNEXEC
28732+ " movl %0, %%cr0\n"
28733+#endif
28734+
28735 " jmp 2b\n"
28736 ".previous\n"
28737- _ASM_EXTABLE(1b, 3b) : : "r" (from));
28738+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
28739
28740 for (i = 0; i < 4096/64; i++) {
28741 __asm__ __volatile__ (
28742- "1: prefetch 320(%0)\n"
28743- "2: movq (%0), %%mm0\n"
28744- " movq 8(%0), %%mm1\n"
28745- " movq 16(%0), %%mm2\n"
28746- " movq 24(%0), %%mm3\n"
28747- " movq %%mm0, (%1)\n"
28748- " movq %%mm1, 8(%1)\n"
28749- " movq %%mm2, 16(%1)\n"
28750- " movq %%mm3, 24(%1)\n"
28751- " movq 32(%0), %%mm0\n"
28752- " movq 40(%0), %%mm1\n"
28753- " movq 48(%0), %%mm2\n"
28754- " movq 56(%0), %%mm3\n"
28755- " movq %%mm0, 32(%1)\n"
28756- " movq %%mm1, 40(%1)\n"
28757- " movq %%mm2, 48(%1)\n"
28758- " movq %%mm3, 56(%1)\n"
28759+ "1: prefetch 320(%1)\n"
28760+ "2: movq (%1), %%mm0\n"
28761+ " movq 8(%1), %%mm1\n"
28762+ " movq 16(%1), %%mm2\n"
28763+ " movq 24(%1), %%mm3\n"
28764+ " movq %%mm0, (%2)\n"
28765+ " movq %%mm1, 8(%2)\n"
28766+ " movq %%mm2, 16(%2)\n"
28767+ " movq %%mm3, 24(%2)\n"
28768+ " movq 32(%1), %%mm0\n"
28769+ " movq 40(%1), %%mm1\n"
28770+ " movq 48(%1), %%mm2\n"
28771+ " movq 56(%1), %%mm3\n"
28772+ " movq %%mm0, 32(%2)\n"
28773+ " movq %%mm1, 40(%2)\n"
28774+ " movq %%mm2, 48(%2)\n"
28775+ " movq %%mm3, 56(%2)\n"
28776 ".section .fixup, \"ax\"\n"
28777- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
28778+ "3:\n"
28779+
28780+#ifdef CONFIG_PAX_KERNEXEC
28781+ " movl %%cr0, %0\n"
28782+ " movl %0, %%eax\n"
28783+ " andl $0xFFFEFFFF, %%eax\n"
28784+ " movl %%eax, %%cr0\n"
28785+#endif
28786+
28787+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
28788+
28789+#ifdef CONFIG_PAX_KERNEXEC
28790+ " movl %0, %%cr0\n"
28791+#endif
28792+
28793 " jmp 2b\n"
28794 ".previous\n"
28795 _ASM_EXTABLE(1b, 3b)
28796- : : "r" (from), "r" (to) : "memory");
28797+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
28798
28799 from += 64;
28800 to += 64;
28801diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
28802index f6d13ee..aca5f0b 100644
28803--- a/arch/x86/lib/msr-reg.S
28804+++ b/arch/x86/lib/msr-reg.S
28805@@ -3,6 +3,7 @@
28806 #include <asm/dwarf2.h>
28807 #include <asm/asm.h>
28808 #include <asm/msr.h>
28809+#include <asm/alternative-asm.h>
28810
28811 #ifdef CONFIG_X86_64
28812 /*
28813@@ -16,7 +17,7 @@ ENTRY(\op\()_safe_regs)
28814 CFI_STARTPROC
28815 pushq_cfi %rbx
28816 pushq_cfi %rbp
28817- movq %rdi, %r10 /* Save pointer */
28818+ movq %rdi, %r9 /* Save pointer */
28819 xorl %r11d, %r11d /* Return value */
28820 movl (%rdi), %eax
28821 movl 4(%rdi), %ecx
28822@@ -27,16 +28,17 @@ ENTRY(\op\()_safe_regs)
28823 movl 28(%rdi), %edi
28824 CFI_REMEMBER_STATE
28825 1: \op
28826-2: movl %eax, (%r10)
28827+2: movl %eax, (%r9)
28828 movl %r11d, %eax /* Return value */
28829- movl %ecx, 4(%r10)
28830- movl %edx, 8(%r10)
28831- movl %ebx, 12(%r10)
28832- movl %ebp, 20(%r10)
28833- movl %esi, 24(%r10)
28834- movl %edi, 28(%r10)
28835+ movl %ecx, 4(%r9)
28836+ movl %edx, 8(%r9)
28837+ movl %ebx, 12(%r9)
28838+ movl %ebp, 20(%r9)
28839+ movl %esi, 24(%r9)
28840+ movl %edi, 28(%r9)
28841 popq_cfi %rbp
28842 popq_cfi %rbx
28843+ pax_force_retaddr
28844 ret
28845 3:
28846 CFI_RESTORE_STATE
28847diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
28848index fc6ba17..d4d989d 100644
28849--- a/arch/x86/lib/putuser.S
28850+++ b/arch/x86/lib/putuser.S
28851@@ -16,7 +16,9 @@
28852 #include <asm/errno.h>
28853 #include <asm/asm.h>
28854 #include <asm/smap.h>
28855-
28856+#include <asm/segment.h>
28857+#include <asm/pgtable.h>
28858+#include <asm/alternative-asm.h>
28859
28860 /*
28861 * __put_user_X
28862@@ -30,57 +32,125 @@
28863 * as they get called from within inline assembly.
28864 */
28865
28866-#define ENTER CFI_STARTPROC ; \
28867- GET_THREAD_INFO(%_ASM_BX)
28868-#define EXIT ASM_CLAC ; \
28869- ret ; \
28870+#define ENTER CFI_STARTPROC
28871+#define EXIT ASM_CLAC ; \
28872+ pax_force_retaddr ; \
28873+ ret ; \
28874 CFI_ENDPROC
28875
28876+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28877+#define _DEST %_ASM_CX,%_ASM_BX
28878+#else
28879+#define _DEST %_ASM_CX
28880+#endif
28881+
28882+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
28883+#define __copyuser_seg gs;
28884+#else
28885+#define __copyuser_seg
28886+#endif
28887+
28888 .text
28889 ENTRY(__put_user_1)
28890 ENTER
28891+
28892+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
28893+ GET_THREAD_INFO(%_ASM_BX)
28894 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
28895 jae bad_put_user
28896 ASM_STAC
28897-1: movb %al,(%_ASM_CX)
28898+
28899+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28900+ mov pax_user_shadow_base,%_ASM_BX
28901+ cmp %_ASM_BX,%_ASM_CX
28902+ jb 1234f
28903+ xor %ebx,%ebx
28904+1234:
28905+#endif
28906+
28907+#endif
28908+
28909+1: __copyuser_seg movb %al,(_DEST)
28910 xor %eax,%eax
28911 EXIT
28912 ENDPROC(__put_user_1)
28913
28914 ENTRY(__put_user_2)
28915 ENTER
28916+
28917+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
28918+ GET_THREAD_INFO(%_ASM_BX)
28919 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
28920 sub $1,%_ASM_BX
28921 cmp %_ASM_BX,%_ASM_CX
28922 jae bad_put_user
28923 ASM_STAC
28924-2: movw %ax,(%_ASM_CX)
28925+
28926+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28927+ mov pax_user_shadow_base,%_ASM_BX
28928+ cmp %_ASM_BX,%_ASM_CX
28929+ jb 1234f
28930+ xor %ebx,%ebx
28931+1234:
28932+#endif
28933+
28934+#endif
28935+
28936+2: __copyuser_seg movw %ax,(_DEST)
28937 xor %eax,%eax
28938 EXIT
28939 ENDPROC(__put_user_2)
28940
28941 ENTRY(__put_user_4)
28942 ENTER
28943+
28944+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
28945+ GET_THREAD_INFO(%_ASM_BX)
28946 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
28947 sub $3,%_ASM_BX
28948 cmp %_ASM_BX,%_ASM_CX
28949 jae bad_put_user
28950 ASM_STAC
28951-3: movl %eax,(%_ASM_CX)
28952+
28953+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28954+ mov pax_user_shadow_base,%_ASM_BX
28955+ cmp %_ASM_BX,%_ASM_CX
28956+ jb 1234f
28957+ xor %ebx,%ebx
28958+1234:
28959+#endif
28960+
28961+#endif
28962+
28963+3: __copyuser_seg movl %eax,(_DEST)
28964 xor %eax,%eax
28965 EXIT
28966 ENDPROC(__put_user_4)
28967
28968 ENTRY(__put_user_8)
28969 ENTER
28970+
28971+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
28972+ GET_THREAD_INFO(%_ASM_BX)
28973 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
28974 sub $7,%_ASM_BX
28975 cmp %_ASM_BX,%_ASM_CX
28976 jae bad_put_user
28977 ASM_STAC
28978-4: mov %_ASM_AX,(%_ASM_CX)
28979+
28980+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28981+ mov pax_user_shadow_base,%_ASM_BX
28982+ cmp %_ASM_BX,%_ASM_CX
28983+ jb 1234f
28984+ xor %ebx,%ebx
28985+1234:
28986+#endif
28987+
28988+#endif
28989+
28990+4: __copyuser_seg mov %_ASM_AX,(_DEST)
28991 #ifdef CONFIG_X86_32
28992-5: movl %edx,4(%_ASM_CX)
28993+5: __copyuser_seg movl %edx,4(_DEST)
28994 #endif
28995 xor %eax,%eax
28996 EXIT
28997diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
28998index 1cad221..de671ee 100644
28999--- a/arch/x86/lib/rwlock.S
29000+++ b/arch/x86/lib/rwlock.S
29001@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
29002 FRAME
29003 0: LOCK_PREFIX
29004 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
29005+
29006+#ifdef CONFIG_PAX_REFCOUNT
29007+ jno 1234f
29008+ LOCK_PREFIX
29009+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
29010+ int $4
29011+1234:
29012+ _ASM_EXTABLE(1234b, 1234b)
29013+#endif
29014+
29015 1: rep; nop
29016 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
29017 jne 1b
29018 LOCK_PREFIX
29019 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
29020+
29021+#ifdef CONFIG_PAX_REFCOUNT
29022+ jno 1234f
29023+ LOCK_PREFIX
29024+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
29025+ int $4
29026+1234:
29027+ _ASM_EXTABLE(1234b, 1234b)
29028+#endif
29029+
29030 jnz 0b
29031 ENDFRAME
29032+ pax_force_retaddr
29033 ret
29034 CFI_ENDPROC
29035 END(__write_lock_failed)
29036@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
29037 FRAME
29038 0: LOCK_PREFIX
29039 READ_LOCK_SIZE(inc) (%__lock_ptr)
29040+
29041+#ifdef CONFIG_PAX_REFCOUNT
29042+ jno 1234f
29043+ LOCK_PREFIX
29044+ READ_LOCK_SIZE(dec) (%__lock_ptr)
29045+ int $4
29046+1234:
29047+ _ASM_EXTABLE(1234b, 1234b)
29048+#endif
29049+
29050 1: rep; nop
29051 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
29052 js 1b
29053 LOCK_PREFIX
29054 READ_LOCK_SIZE(dec) (%__lock_ptr)
29055+
29056+#ifdef CONFIG_PAX_REFCOUNT
29057+ jno 1234f
29058+ LOCK_PREFIX
29059+ READ_LOCK_SIZE(inc) (%__lock_ptr)
29060+ int $4
29061+1234:
29062+ _ASM_EXTABLE(1234b, 1234b)
29063+#endif
29064+
29065 js 0b
29066 ENDFRAME
29067+ pax_force_retaddr
29068 ret
29069 CFI_ENDPROC
29070 END(__read_lock_failed)
29071diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
29072index 5dff5f0..cadebf4 100644
29073--- a/arch/x86/lib/rwsem.S
29074+++ b/arch/x86/lib/rwsem.S
29075@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
29076 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
29077 CFI_RESTORE __ASM_REG(dx)
29078 restore_common_regs
29079+ pax_force_retaddr
29080 ret
29081 CFI_ENDPROC
29082 ENDPROC(call_rwsem_down_read_failed)
29083@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
29084 movq %rax,%rdi
29085 call rwsem_down_write_failed
29086 restore_common_regs
29087+ pax_force_retaddr
29088 ret
29089 CFI_ENDPROC
29090 ENDPROC(call_rwsem_down_write_failed)
29091@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
29092 movq %rax,%rdi
29093 call rwsem_wake
29094 restore_common_regs
29095-1: ret
29096+1: pax_force_retaddr
29097+ ret
29098 CFI_ENDPROC
29099 ENDPROC(call_rwsem_wake)
29100
29101@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
29102 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
29103 CFI_RESTORE __ASM_REG(dx)
29104 restore_common_regs
29105+ pax_force_retaddr
29106 ret
29107 CFI_ENDPROC
29108 ENDPROC(call_rwsem_downgrade_wake)
29109diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
29110index a63efd6..ccecad8 100644
29111--- a/arch/x86/lib/thunk_64.S
29112+++ b/arch/x86/lib/thunk_64.S
29113@@ -8,6 +8,7 @@
29114 #include <linux/linkage.h>
29115 #include <asm/dwarf2.h>
29116 #include <asm/calling.h>
29117+#include <asm/alternative-asm.h>
29118
29119 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
29120 .macro THUNK name, func, put_ret_addr_in_rdi=0
29121@@ -41,5 +42,6 @@
29122 SAVE_ARGS
29123 restore:
29124 RESTORE_ARGS
29125+ pax_force_retaddr
29126 ret
29127 CFI_ENDPROC
29128diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
29129index 3eb18ac..6890bc3 100644
29130--- a/arch/x86/lib/usercopy_32.c
29131+++ b/arch/x86/lib/usercopy_32.c
29132@@ -42,11 +42,13 @@ do { \
29133 int __d0; \
29134 might_fault(); \
29135 __asm__ __volatile__( \
29136+ __COPYUSER_SET_ES \
29137 ASM_STAC "\n" \
29138 "0: rep; stosl\n" \
29139 " movl %2,%0\n" \
29140 "1: rep; stosb\n" \
29141 "2: " ASM_CLAC "\n" \
29142+ __COPYUSER_RESTORE_ES \
29143 ".section .fixup,\"ax\"\n" \
29144 "3: lea 0(%2,%0,4),%0\n" \
29145 " jmp 2b\n" \
29146@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
29147
29148 #ifdef CONFIG_X86_INTEL_USERCOPY
29149 static unsigned long
29150-__copy_user_intel(void __user *to, const void *from, unsigned long size)
29151+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
29152 {
29153 int d0, d1;
29154 __asm__ __volatile__(
29155@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
29156 " .align 2,0x90\n"
29157 "3: movl 0(%4), %%eax\n"
29158 "4: movl 4(%4), %%edx\n"
29159- "5: movl %%eax, 0(%3)\n"
29160- "6: movl %%edx, 4(%3)\n"
29161+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
29162+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
29163 "7: movl 8(%4), %%eax\n"
29164 "8: movl 12(%4),%%edx\n"
29165- "9: movl %%eax, 8(%3)\n"
29166- "10: movl %%edx, 12(%3)\n"
29167+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
29168+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
29169 "11: movl 16(%4), %%eax\n"
29170 "12: movl 20(%4), %%edx\n"
29171- "13: movl %%eax, 16(%3)\n"
29172- "14: movl %%edx, 20(%3)\n"
29173+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
29174+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
29175 "15: movl 24(%4), %%eax\n"
29176 "16: movl 28(%4), %%edx\n"
29177- "17: movl %%eax, 24(%3)\n"
29178- "18: movl %%edx, 28(%3)\n"
29179+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
29180+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
29181 "19: movl 32(%4), %%eax\n"
29182 "20: movl 36(%4), %%edx\n"
29183- "21: movl %%eax, 32(%3)\n"
29184- "22: movl %%edx, 36(%3)\n"
29185+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
29186+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
29187 "23: movl 40(%4), %%eax\n"
29188 "24: movl 44(%4), %%edx\n"
29189- "25: movl %%eax, 40(%3)\n"
29190- "26: movl %%edx, 44(%3)\n"
29191+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
29192+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
29193 "27: movl 48(%4), %%eax\n"
29194 "28: movl 52(%4), %%edx\n"
29195- "29: movl %%eax, 48(%3)\n"
29196- "30: movl %%edx, 52(%3)\n"
29197+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
29198+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
29199 "31: movl 56(%4), %%eax\n"
29200 "32: movl 60(%4), %%edx\n"
29201- "33: movl %%eax, 56(%3)\n"
29202- "34: movl %%edx, 60(%3)\n"
29203+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
29204+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
29205 " addl $-64, %0\n"
29206 " addl $64, %4\n"
29207 " addl $64, %3\n"
29208@@ -149,10 +151,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
29209 " shrl $2, %0\n"
29210 " andl $3, %%eax\n"
29211 " cld\n"
29212+ __COPYUSER_SET_ES
29213 "99: rep; movsl\n"
29214 "36: movl %%eax, %0\n"
29215 "37: rep; movsb\n"
29216 "100:\n"
29217+ __COPYUSER_RESTORE_ES
29218 ".section .fixup,\"ax\"\n"
29219 "101: lea 0(%%eax,%0,4),%0\n"
29220 " jmp 100b\n"
29221@@ -202,46 +206,150 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
29222 }
29223
29224 static unsigned long
29225+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
29226+{
29227+ int d0, d1;
29228+ __asm__ __volatile__(
29229+ " .align 2,0x90\n"
29230+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
29231+ " cmpl $67, %0\n"
29232+ " jbe 3f\n"
29233+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
29234+ " .align 2,0x90\n"
29235+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
29236+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
29237+ "5: movl %%eax, 0(%3)\n"
29238+ "6: movl %%edx, 4(%3)\n"
29239+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
29240+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
29241+ "9: movl %%eax, 8(%3)\n"
29242+ "10: movl %%edx, 12(%3)\n"
29243+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
29244+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
29245+ "13: movl %%eax, 16(%3)\n"
29246+ "14: movl %%edx, 20(%3)\n"
29247+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
29248+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
29249+ "17: movl %%eax, 24(%3)\n"
29250+ "18: movl %%edx, 28(%3)\n"
29251+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
29252+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
29253+ "21: movl %%eax, 32(%3)\n"
29254+ "22: movl %%edx, 36(%3)\n"
29255+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
29256+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
29257+ "25: movl %%eax, 40(%3)\n"
29258+ "26: movl %%edx, 44(%3)\n"
29259+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
29260+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
29261+ "29: movl %%eax, 48(%3)\n"
29262+ "30: movl %%edx, 52(%3)\n"
29263+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
29264+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
29265+ "33: movl %%eax, 56(%3)\n"
29266+ "34: movl %%edx, 60(%3)\n"
29267+ " addl $-64, %0\n"
29268+ " addl $64, %4\n"
29269+ " addl $64, %3\n"
29270+ " cmpl $63, %0\n"
29271+ " ja 1b\n"
29272+ "35: movl %0, %%eax\n"
29273+ " shrl $2, %0\n"
29274+ " andl $3, %%eax\n"
29275+ " cld\n"
29276+ "99: rep; "__copyuser_seg" movsl\n"
29277+ "36: movl %%eax, %0\n"
29278+ "37: rep; "__copyuser_seg" movsb\n"
29279+ "100:\n"
29280+ ".section .fixup,\"ax\"\n"
29281+ "101: lea 0(%%eax,%0,4),%0\n"
29282+ " jmp 100b\n"
29283+ ".previous\n"
29284+ _ASM_EXTABLE(1b,100b)
29285+ _ASM_EXTABLE(2b,100b)
29286+ _ASM_EXTABLE(3b,100b)
29287+ _ASM_EXTABLE(4b,100b)
29288+ _ASM_EXTABLE(5b,100b)
29289+ _ASM_EXTABLE(6b,100b)
29290+ _ASM_EXTABLE(7b,100b)
29291+ _ASM_EXTABLE(8b,100b)
29292+ _ASM_EXTABLE(9b,100b)
29293+ _ASM_EXTABLE(10b,100b)
29294+ _ASM_EXTABLE(11b,100b)
29295+ _ASM_EXTABLE(12b,100b)
29296+ _ASM_EXTABLE(13b,100b)
29297+ _ASM_EXTABLE(14b,100b)
29298+ _ASM_EXTABLE(15b,100b)
29299+ _ASM_EXTABLE(16b,100b)
29300+ _ASM_EXTABLE(17b,100b)
29301+ _ASM_EXTABLE(18b,100b)
29302+ _ASM_EXTABLE(19b,100b)
29303+ _ASM_EXTABLE(20b,100b)
29304+ _ASM_EXTABLE(21b,100b)
29305+ _ASM_EXTABLE(22b,100b)
29306+ _ASM_EXTABLE(23b,100b)
29307+ _ASM_EXTABLE(24b,100b)
29308+ _ASM_EXTABLE(25b,100b)
29309+ _ASM_EXTABLE(26b,100b)
29310+ _ASM_EXTABLE(27b,100b)
29311+ _ASM_EXTABLE(28b,100b)
29312+ _ASM_EXTABLE(29b,100b)
29313+ _ASM_EXTABLE(30b,100b)
29314+ _ASM_EXTABLE(31b,100b)
29315+ _ASM_EXTABLE(32b,100b)
29316+ _ASM_EXTABLE(33b,100b)
29317+ _ASM_EXTABLE(34b,100b)
29318+ _ASM_EXTABLE(35b,100b)
29319+ _ASM_EXTABLE(36b,100b)
29320+ _ASM_EXTABLE(37b,100b)
29321+ _ASM_EXTABLE(99b,101b)
29322+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
29323+ : "1"(to), "2"(from), "0"(size)
29324+ : "eax", "edx", "memory");
29325+ return size;
29326+}
29327+
29328+static unsigned long __size_overflow(3)
29329 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
29330 {
29331 int d0, d1;
29332 __asm__ __volatile__(
29333 " .align 2,0x90\n"
29334- "0: movl 32(%4), %%eax\n"
29335+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
29336 " cmpl $67, %0\n"
29337 " jbe 2f\n"
29338- "1: movl 64(%4), %%eax\n"
29339+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
29340 " .align 2,0x90\n"
29341- "2: movl 0(%4), %%eax\n"
29342- "21: movl 4(%4), %%edx\n"
29343+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
29344+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
29345 " movl %%eax, 0(%3)\n"
29346 " movl %%edx, 4(%3)\n"
29347- "3: movl 8(%4), %%eax\n"
29348- "31: movl 12(%4),%%edx\n"
29349+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
29350+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
29351 " movl %%eax, 8(%3)\n"
29352 " movl %%edx, 12(%3)\n"
29353- "4: movl 16(%4), %%eax\n"
29354- "41: movl 20(%4), %%edx\n"
29355+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
29356+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
29357 " movl %%eax, 16(%3)\n"
29358 " movl %%edx, 20(%3)\n"
29359- "10: movl 24(%4), %%eax\n"
29360- "51: movl 28(%4), %%edx\n"
29361+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
29362+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
29363 " movl %%eax, 24(%3)\n"
29364 " movl %%edx, 28(%3)\n"
29365- "11: movl 32(%4), %%eax\n"
29366- "61: movl 36(%4), %%edx\n"
29367+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
29368+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
29369 " movl %%eax, 32(%3)\n"
29370 " movl %%edx, 36(%3)\n"
29371- "12: movl 40(%4), %%eax\n"
29372- "71: movl 44(%4), %%edx\n"
29373+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
29374+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
29375 " movl %%eax, 40(%3)\n"
29376 " movl %%edx, 44(%3)\n"
29377- "13: movl 48(%4), %%eax\n"
29378- "81: movl 52(%4), %%edx\n"
29379+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
29380+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
29381 " movl %%eax, 48(%3)\n"
29382 " movl %%edx, 52(%3)\n"
29383- "14: movl 56(%4), %%eax\n"
29384- "91: movl 60(%4), %%edx\n"
29385+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
29386+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
29387 " movl %%eax, 56(%3)\n"
29388 " movl %%edx, 60(%3)\n"
29389 " addl $-64, %0\n"
29390@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
29391 " shrl $2, %0\n"
29392 " andl $3, %%eax\n"
29393 " cld\n"
29394- "6: rep; movsl\n"
29395+ "6: rep; "__copyuser_seg" movsl\n"
29396 " movl %%eax,%0\n"
29397- "7: rep; movsb\n"
29398+ "7: rep; "__copyuser_seg" movsb\n"
29399 "8:\n"
29400 ".section .fixup,\"ax\"\n"
29401 "9: lea 0(%%eax,%0,4),%0\n"
29402@@ -298,48 +406,48 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
29403 * hyoshiok@miraclelinux.com
29404 */
29405
29406-static unsigned long __copy_user_zeroing_intel_nocache(void *to,
29407+static unsigned long __size_overflow(3) __copy_user_zeroing_intel_nocache(void *to,
29408 const void __user *from, unsigned long size)
29409 {
29410 int d0, d1;
29411
29412 __asm__ __volatile__(
29413 " .align 2,0x90\n"
29414- "0: movl 32(%4), %%eax\n"
29415+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
29416 " cmpl $67, %0\n"
29417 " jbe 2f\n"
29418- "1: movl 64(%4), %%eax\n"
29419+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
29420 " .align 2,0x90\n"
29421- "2: movl 0(%4), %%eax\n"
29422- "21: movl 4(%4), %%edx\n"
29423+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
29424+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
29425 " movnti %%eax, 0(%3)\n"
29426 " movnti %%edx, 4(%3)\n"
29427- "3: movl 8(%4), %%eax\n"
29428- "31: movl 12(%4),%%edx\n"
29429+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
29430+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
29431 " movnti %%eax, 8(%3)\n"
29432 " movnti %%edx, 12(%3)\n"
29433- "4: movl 16(%4), %%eax\n"
29434- "41: movl 20(%4), %%edx\n"
29435+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
29436+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
29437 " movnti %%eax, 16(%3)\n"
29438 " movnti %%edx, 20(%3)\n"
29439- "10: movl 24(%4), %%eax\n"
29440- "51: movl 28(%4), %%edx\n"
29441+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
29442+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
29443 " movnti %%eax, 24(%3)\n"
29444 " movnti %%edx, 28(%3)\n"
29445- "11: movl 32(%4), %%eax\n"
29446- "61: movl 36(%4), %%edx\n"
29447+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
29448+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
29449 " movnti %%eax, 32(%3)\n"
29450 " movnti %%edx, 36(%3)\n"
29451- "12: movl 40(%4), %%eax\n"
29452- "71: movl 44(%4), %%edx\n"
29453+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
29454+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
29455 " movnti %%eax, 40(%3)\n"
29456 " movnti %%edx, 44(%3)\n"
29457- "13: movl 48(%4), %%eax\n"
29458- "81: movl 52(%4), %%edx\n"
29459+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
29460+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
29461 " movnti %%eax, 48(%3)\n"
29462 " movnti %%edx, 52(%3)\n"
29463- "14: movl 56(%4), %%eax\n"
29464- "91: movl 60(%4), %%edx\n"
29465+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
29466+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
29467 " movnti %%eax, 56(%3)\n"
29468 " movnti %%edx, 60(%3)\n"
29469 " addl $-64, %0\n"
29470@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
29471 " shrl $2, %0\n"
29472 " andl $3, %%eax\n"
29473 " cld\n"
29474- "6: rep; movsl\n"
29475+ "6: rep; "__copyuser_seg" movsl\n"
29476 " movl %%eax,%0\n"
29477- "7: rep; movsb\n"
29478+ "7: rep; "__copyuser_seg" movsb\n"
29479 "8:\n"
29480 ".section .fixup,\"ax\"\n"
29481 "9: lea 0(%%eax,%0,4),%0\n"
29482@@ -392,48 +500,48 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
29483 return size;
29484 }
29485
29486-static unsigned long __copy_user_intel_nocache(void *to,
29487+static unsigned long __size_overflow(3) __copy_user_intel_nocache(void *to,
29488 const void __user *from, unsigned long size)
29489 {
29490 int d0, d1;
29491
29492 __asm__ __volatile__(
29493 " .align 2,0x90\n"
29494- "0: movl 32(%4), %%eax\n"
29495+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
29496 " cmpl $67, %0\n"
29497 " jbe 2f\n"
29498- "1: movl 64(%4), %%eax\n"
29499+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
29500 " .align 2,0x90\n"
29501- "2: movl 0(%4), %%eax\n"
29502- "21: movl 4(%4), %%edx\n"
29503+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
29504+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
29505 " movnti %%eax, 0(%3)\n"
29506 " movnti %%edx, 4(%3)\n"
29507- "3: movl 8(%4), %%eax\n"
29508- "31: movl 12(%4),%%edx\n"
29509+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
29510+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
29511 " movnti %%eax, 8(%3)\n"
29512 " movnti %%edx, 12(%3)\n"
29513- "4: movl 16(%4), %%eax\n"
29514- "41: movl 20(%4), %%edx\n"
29515+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
29516+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
29517 " movnti %%eax, 16(%3)\n"
29518 " movnti %%edx, 20(%3)\n"
29519- "10: movl 24(%4), %%eax\n"
29520- "51: movl 28(%4), %%edx\n"
29521+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
29522+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
29523 " movnti %%eax, 24(%3)\n"
29524 " movnti %%edx, 28(%3)\n"
29525- "11: movl 32(%4), %%eax\n"
29526- "61: movl 36(%4), %%edx\n"
29527+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
29528+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
29529 " movnti %%eax, 32(%3)\n"
29530 " movnti %%edx, 36(%3)\n"
29531- "12: movl 40(%4), %%eax\n"
29532- "71: movl 44(%4), %%edx\n"
29533+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
29534+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
29535 " movnti %%eax, 40(%3)\n"
29536 " movnti %%edx, 44(%3)\n"
29537- "13: movl 48(%4), %%eax\n"
29538- "81: movl 52(%4), %%edx\n"
29539+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
29540+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
29541 " movnti %%eax, 48(%3)\n"
29542 " movnti %%edx, 52(%3)\n"
29543- "14: movl 56(%4), %%eax\n"
29544- "91: movl 60(%4), %%edx\n"
29545+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
29546+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
29547 " movnti %%eax, 56(%3)\n"
29548 " movnti %%edx, 60(%3)\n"
29549 " addl $-64, %0\n"
29550@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
29551 " shrl $2, %0\n"
29552 " andl $3, %%eax\n"
29553 " cld\n"
29554- "6: rep; movsl\n"
29555+ "6: rep; "__copyuser_seg" movsl\n"
29556 " movl %%eax,%0\n"
29557- "7: rep; movsb\n"
29558+ "7: rep; "__copyuser_seg" movsb\n"
29559 "8:\n"
29560 ".section .fixup,\"ax\"\n"
29561 "9: lea 0(%%eax,%0,4),%0\n"
29562@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
29563 */
29564 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
29565 unsigned long size);
29566-unsigned long __copy_user_intel(void __user *to, const void *from,
29567+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
29568+ unsigned long size);
29569+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
29570 unsigned long size);
29571 unsigned long __copy_user_zeroing_intel_nocache(void *to,
29572 const void __user *from, unsigned long size);
29573 #endif /* CONFIG_X86_INTEL_USERCOPY */
29574
29575 /* Generic arbitrary sized copy. */
29576-#define __copy_user(to, from, size) \
29577+#define __copy_user(to, from, size, prefix, set, restore) \
29578 do { \
29579 int __d0, __d1, __d2; \
29580 __asm__ __volatile__( \
29581+ set \
29582 " cmp $7,%0\n" \
29583 " jbe 1f\n" \
29584 " movl %1,%0\n" \
29585 " negl %0\n" \
29586 " andl $7,%0\n" \
29587 " subl %0,%3\n" \
29588- "4: rep; movsb\n" \
29589+ "4: rep; "prefix"movsb\n" \
29590 " movl %3,%0\n" \
29591 " shrl $2,%0\n" \
29592 " andl $3,%3\n" \
29593 " .align 2,0x90\n" \
29594- "0: rep; movsl\n" \
29595+ "0: rep; "prefix"movsl\n" \
29596 " movl %3,%0\n" \
29597- "1: rep; movsb\n" \
29598+ "1: rep; "prefix"movsb\n" \
29599 "2:\n" \
29600+ restore \
29601 ".section .fixup,\"ax\"\n" \
29602 "5: addl %3,%0\n" \
29603 " jmp 2b\n" \
29604@@ -538,14 +650,14 @@ do { \
29605 " negl %0\n" \
29606 " andl $7,%0\n" \
29607 " subl %0,%3\n" \
29608- "4: rep; movsb\n" \
29609+ "4: rep; "__copyuser_seg"movsb\n" \
29610 " movl %3,%0\n" \
29611 " shrl $2,%0\n" \
29612 " andl $3,%3\n" \
29613 " .align 2,0x90\n" \
29614- "0: rep; movsl\n" \
29615+ "0: rep; "__copyuser_seg"movsl\n" \
29616 " movl %3,%0\n" \
29617- "1: rep; movsb\n" \
29618+ "1: rep; "__copyuser_seg"movsb\n" \
29619 "2:\n" \
29620 ".section .fixup,\"ax\"\n" \
29621 "5: addl %3,%0\n" \
29622@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
29623 {
29624 stac();
29625 if (movsl_is_ok(to, from, n))
29626- __copy_user(to, from, n);
29627+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
29628 else
29629- n = __copy_user_intel(to, from, n);
29630+ n = __generic_copy_to_user_intel(to, from, n);
29631 clac();
29632 return n;
29633 }
29634@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
29635 {
29636 stac();
29637 if (movsl_is_ok(to, from, n))
29638- __copy_user(to, from, n);
29639+ __copy_user(to, from, n, __copyuser_seg, "", "");
29640 else
29641- n = __copy_user_intel((void __user *)to,
29642- (const void *)from, n);
29643+ n = __generic_copy_from_user_intel(to, from, n);
29644 clac();
29645 return n;
29646 }
29647@@ -632,60 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
29648 if (n > 64 && cpu_has_xmm2)
29649 n = __copy_user_intel_nocache(to, from, n);
29650 else
29651- __copy_user(to, from, n);
29652+ __copy_user(to, from, n, __copyuser_seg, "", "");
29653 #else
29654- __copy_user(to, from, n);
29655+ __copy_user(to, from, n, __copyuser_seg, "", "");
29656 #endif
29657 clac();
29658 return n;
29659 }
29660 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
29661
29662-/**
29663- * copy_to_user: - Copy a block of data into user space.
29664- * @to: Destination address, in user space.
29665- * @from: Source address, in kernel space.
29666- * @n: Number of bytes to copy.
29667- *
29668- * Context: User context only. This function may sleep.
29669- *
29670- * Copy data from kernel space to user space.
29671- *
29672- * Returns number of bytes that could not be copied.
29673- * On success, this will be zero.
29674- */
29675-unsigned long
29676-copy_to_user(void __user *to, const void *from, unsigned long n)
29677+#ifdef CONFIG_PAX_MEMORY_UDEREF
29678+void __set_fs(mm_segment_t x)
29679 {
29680- if (access_ok(VERIFY_WRITE, to, n))
29681- n = __copy_to_user(to, from, n);
29682- return n;
29683+ switch (x.seg) {
29684+ case 0:
29685+ loadsegment(gs, 0);
29686+ break;
29687+ case TASK_SIZE_MAX:
29688+ loadsegment(gs, __USER_DS);
29689+ break;
29690+ case -1UL:
29691+ loadsegment(gs, __KERNEL_DS);
29692+ break;
29693+ default:
29694+ BUG();
29695+ }
29696 }
29697-EXPORT_SYMBOL(copy_to_user);
29698+EXPORT_SYMBOL(__set_fs);
29699
29700-/**
29701- * copy_from_user: - Copy a block of data from user space.
29702- * @to: Destination address, in kernel space.
29703- * @from: Source address, in user space.
29704- * @n: Number of bytes to copy.
29705- *
29706- * Context: User context only. This function may sleep.
29707- *
29708- * Copy data from user space to kernel space.
29709- *
29710- * Returns number of bytes that could not be copied.
29711- * On success, this will be zero.
29712- *
29713- * If some data could not be copied, this function will pad the copied
29714- * data to the requested size using zero bytes.
29715- */
29716-unsigned long
29717-_copy_from_user(void *to, const void __user *from, unsigned long n)
29718+void set_fs(mm_segment_t x)
29719 {
29720- if (access_ok(VERIFY_READ, from, n))
29721- n = __copy_from_user(to, from, n);
29722- else
29723- memset(to, 0, n);
29724- return n;
29725+ current_thread_info()->addr_limit = x;
29726+ __set_fs(x);
29727 }
29728-EXPORT_SYMBOL(_copy_from_user);
29729+EXPORT_SYMBOL(set_fs);
29730+#endif
29731diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
29732index 906fea3..0194a18 100644
29733--- a/arch/x86/lib/usercopy_64.c
29734+++ b/arch/x86/lib/usercopy_64.c
29735@@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
29736 might_fault();
29737 /* no memory constraint because it doesn't change any memory gcc knows
29738 about */
29739+ pax_open_userland();
29740 stac();
29741 asm volatile(
29742 " testq %[size8],%[size8]\n"
29743@@ -39,9 +40,10 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
29744 _ASM_EXTABLE(0b,3b)
29745 _ASM_EXTABLE(1b,2b)
29746 : [size8] "=&c"(size), [dst] "=&D" (__d0)
29747- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
29748+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
29749 [zero] "r" (0UL), [eight] "r" (8UL));
29750 clac();
29751+ pax_close_userland();
29752 return size;
29753 }
29754 EXPORT_SYMBOL(__clear_user);
29755@@ -54,12 +56,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
29756 }
29757 EXPORT_SYMBOL(clear_user);
29758
29759-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
29760+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
29761 {
29762- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
29763- return copy_user_generic((__force void *)to, (__force void *)from, len);
29764- }
29765- return len;
29766+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
29767+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
29768+ return len;
29769 }
29770 EXPORT_SYMBOL(copy_in_user);
29771
29772@@ -69,11 +70,13 @@ EXPORT_SYMBOL(copy_in_user);
29773 * it is not necessary to optimize tail handling.
29774 */
29775 unsigned long
29776-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
29777+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
29778 {
29779 char c;
29780 unsigned zero_len;
29781
29782+ clac();
29783+ pax_close_userland();
29784 for (; len; --len, to++) {
29785 if (__get_user_nocheck(c, from++, sizeof(char)))
29786 break;
29787@@ -84,6 +87,5 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
29788 for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
29789 if (__put_user_nocheck(c, to++, sizeof(char)))
29790 break;
29791- clac();
29792 return len;
29793 }
29794diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
29795index 23d8e5f..9ccc13a 100644
29796--- a/arch/x86/mm/Makefile
29797+++ b/arch/x86/mm/Makefile
29798@@ -28,3 +28,7 @@ obj-$(CONFIG_ACPI_NUMA) += srat.o
29799 obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
29800
29801 obj-$(CONFIG_MEMTEST) += memtest.o
29802+
29803+quote:="
29804+obj-$(CONFIG_X86_64) += uderef_64.o
29805+CFLAGS_uderef_64.o := $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
29806diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
29807index 903ec1e..c4166b2 100644
29808--- a/arch/x86/mm/extable.c
29809+++ b/arch/x86/mm/extable.c
29810@@ -6,12 +6,24 @@
29811 static inline unsigned long
29812 ex_insn_addr(const struct exception_table_entry *x)
29813 {
29814- return (unsigned long)&x->insn + x->insn;
29815+ unsigned long reloc = 0;
29816+
29817+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
29818+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
29819+#endif
29820+
29821+ return (unsigned long)&x->insn + x->insn + reloc;
29822 }
29823 static inline unsigned long
29824 ex_fixup_addr(const struct exception_table_entry *x)
29825 {
29826- return (unsigned long)&x->fixup + x->fixup;
29827+ unsigned long reloc = 0;
29828+
29829+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
29830+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
29831+#endif
29832+
29833+ return (unsigned long)&x->fixup + x->fixup + reloc;
29834 }
29835
29836 int fixup_exception(struct pt_regs *regs)
29837@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
29838 unsigned long new_ip;
29839
29840 #ifdef CONFIG_PNPBIOS
29841- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
29842+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
29843 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
29844 extern u32 pnp_bios_is_utter_crap;
29845 pnp_bios_is_utter_crap = 1;
29846@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
29847 i += 4;
29848 p->fixup -= i;
29849 i += 4;
29850+
29851+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
29852+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
29853+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
29854+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
29855+#endif
29856+
29857 }
29858 }
29859
29860diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
29861index 654be4a..a4a3da1 100644
29862--- a/arch/x86/mm/fault.c
29863+++ b/arch/x86/mm/fault.c
29864@@ -14,11 +14,18 @@
29865 #include <linux/hugetlb.h> /* hstate_index_to_shift */
29866 #include <linux/prefetch.h> /* prefetchw */
29867 #include <linux/context_tracking.h> /* exception_enter(), ... */
29868+#include <linux/unistd.h>
29869+#include <linux/compiler.h>
29870
29871 #include <asm/traps.h> /* dotraplinkage, ... */
29872 #include <asm/pgalloc.h> /* pgd_*(), ... */
29873 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
29874 #include <asm/fixmap.h> /* VSYSCALL_START */
29875+#include <asm/tlbflush.h>
29876+
29877+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29878+#include <asm/stacktrace.h>
29879+#endif
29880
29881 /*
29882 * Page fault error code bits:
29883@@ -56,7 +63,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
29884 int ret = 0;
29885
29886 /* kprobe_running() needs smp_processor_id() */
29887- if (kprobes_built_in() && !user_mode_vm(regs)) {
29888+ if (kprobes_built_in() && !user_mode(regs)) {
29889 preempt_disable();
29890 if (kprobe_running() && kprobe_fault_handler(regs, 14))
29891 ret = 1;
29892@@ -117,7 +124,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
29893 return !instr_lo || (instr_lo>>1) == 1;
29894 case 0x00:
29895 /* Prefetch instruction is 0x0F0D or 0x0F18 */
29896- if (probe_kernel_address(instr, opcode))
29897+ if (user_mode(regs)) {
29898+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
29899+ return 0;
29900+ } else if (probe_kernel_address(instr, opcode))
29901 return 0;
29902
29903 *prefetch = (instr_lo == 0xF) &&
29904@@ -151,7 +161,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
29905 while (instr < max_instr) {
29906 unsigned char opcode;
29907
29908- if (probe_kernel_address(instr, opcode))
29909+ if (user_mode(regs)) {
29910+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
29911+ break;
29912+ } else if (probe_kernel_address(instr, opcode))
29913 break;
29914
29915 instr++;
29916@@ -182,6 +195,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
29917 force_sig_info(si_signo, &info, tsk);
29918 }
29919
29920+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
29921+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
29922+#endif
29923+
29924+#ifdef CONFIG_PAX_EMUTRAMP
29925+static int pax_handle_fetch_fault(struct pt_regs *regs);
29926+#endif
29927+
29928+#ifdef CONFIG_PAX_PAGEEXEC
29929+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
29930+{
29931+ pgd_t *pgd;
29932+ pud_t *pud;
29933+ pmd_t *pmd;
29934+
29935+ pgd = pgd_offset(mm, address);
29936+ if (!pgd_present(*pgd))
29937+ return NULL;
29938+ pud = pud_offset(pgd, address);
29939+ if (!pud_present(*pud))
29940+ return NULL;
29941+ pmd = pmd_offset(pud, address);
29942+ if (!pmd_present(*pmd))
29943+ return NULL;
29944+ return pmd;
29945+}
29946+#endif
29947+
29948 DEFINE_SPINLOCK(pgd_lock);
29949 LIST_HEAD(pgd_list);
29950
29951@@ -232,10 +273,27 @@ void vmalloc_sync_all(void)
29952 for (address = VMALLOC_START & PMD_MASK;
29953 address >= TASK_SIZE && address < FIXADDR_TOP;
29954 address += PMD_SIZE) {
29955+
29956+#ifdef CONFIG_PAX_PER_CPU_PGD
29957+ unsigned long cpu;
29958+#else
29959 struct page *page;
29960+#endif
29961
29962 spin_lock(&pgd_lock);
29963+
29964+#ifdef CONFIG_PAX_PER_CPU_PGD
29965+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
29966+ pgd_t *pgd = get_cpu_pgd(cpu, user);
29967+ pmd_t *ret;
29968+
29969+ ret = vmalloc_sync_one(pgd, address);
29970+ if (!ret)
29971+ break;
29972+ pgd = get_cpu_pgd(cpu, kernel);
29973+#else
29974 list_for_each_entry(page, &pgd_list, lru) {
29975+ pgd_t *pgd;
29976 spinlock_t *pgt_lock;
29977 pmd_t *ret;
29978
29979@@ -243,8 +301,14 @@ void vmalloc_sync_all(void)
29980 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
29981
29982 spin_lock(pgt_lock);
29983- ret = vmalloc_sync_one(page_address(page), address);
29984+ pgd = page_address(page);
29985+#endif
29986+
29987+ ret = vmalloc_sync_one(pgd, address);
29988+
29989+#ifndef CONFIG_PAX_PER_CPU_PGD
29990 spin_unlock(pgt_lock);
29991+#endif
29992
29993 if (!ret)
29994 break;
29995@@ -278,6 +342,12 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
29996 * an interrupt in the middle of a task switch..
29997 */
29998 pgd_paddr = read_cr3();
29999+
30000+#ifdef CONFIG_PAX_PER_CPU_PGD
30001+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (pgd_paddr & __PHYSICAL_MASK));
30002+ vmalloc_sync_one(__va(pgd_paddr + PAGE_SIZE), address);
30003+#endif
30004+
30005 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
30006 if (!pmd_k)
30007 return -1;
30008@@ -373,11 +443,25 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
30009 * happen within a race in page table update. In the later
30010 * case just flush:
30011 */
30012- pgd = pgd_offset(current->active_mm, address);
30013+
30014 pgd_ref = pgd_offset_k(address);
30015 if (pgd_none(*pgd_ref))
30016 return -1;
30017
30018+#ifdef CONFIG_PAX_PER_CPU_PGD
30019+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (read_cr3() & __PHYSICAL_MASK));
30020+ pgd = pgd_offset_cpu(smp_processor_id(), user, address);
30021+ if (pgd_none(*pgd)) {
30022+ set_pgd(pgd, *pgd_ref);
30023+ arch_flush_lazy_mmu_mode();
30024+ } else {
30025+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
30026+ }
30027+ pgd = pgd_offset_cpu(smp_processor_id(), kernel, address);
30028+#else
30029+ pgd = pgd_offset(current->active_mm, address);
30030+#endif
30031+
30032 if (pgd_none(*pgd)) {
30033 set_pgd(pgd, *pgd_ref);
30034 arch_flush_lazy_mmu_mode();
30035@@ -543,7 +627,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
30036 static int is_errata100(struct pt_regs *regs, unsigned long address)
30037 {
30038 #ifdef CONFIG_X86_64
30039- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
30040+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
30041 return 1;
30042 #endif
30043 return 0;
30044@@ -570,7 +654,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
30045 }
30046
30047 static const char nx_warning[] = KERN_CRIT
30048-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
30049+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
30050
30051 static void
30052 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
30053@@ -579,15 +663,27 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
30054 if (!oops_may_print())
30055 return;
30056
30057- if (error_code & PF_INSTR) {
30058+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
30059 unsigned int level;
30060
30061 pte_t *pte = lookup_address(address, &level);
30062
30063 if (pte && pte_present(*pte) && !pte_exec(*pte))
30064- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
30065+ printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
30066 }
30067
30068+#ifdef CONFIG_PAX_KERNEXEC
30069+ if (init_mm.start_code <= address && address < init_mm.end_code) {
30070+ if (current->signal->curr_ip)
30071+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
30072+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
30073+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
30074+ else
30075+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
30076+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
30077+ }
30078+#endif
30079+
30080 printk(KERN_ALERT "BUG: unable to handle kernel ");
30081 if (address < PAGE_SIZE)
30082 printk(KERN_CONT "NULL pointer dereference");
30083@@ -750,6 +846,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
30084 return;
30085 }
30086 #endif
30087+
30088+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
30089+ if (pax_is_fetch_fault(regs, error_code, address)) {
30090+
30091+#ifdef CONFIG_PAX_EMUTRAMP
30092+ switch (pax_handle_fetch_fault(regs)) {
30093+ case 2:
30094+ return;
30095+ }
30096+#endif
30097+
30098+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
30099+ do_group_exit(SIGKILL);
30100+ }
30101+#endif
30102+
30103 /* Kernel addresses are always protection faults: */
30104 if (address >= TASK_SIZE)
30105 error_code |= PF_PROT;
30106@@ -835,7 +947,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
30107 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
30108 printk(KERN_ERR
30109 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
30110- tsk->comm, tsk->pid, address);
30111+ tsk->comm, task_pid_nr(tsk), address);
30112 code = BUS_MCEERR_AR;
30113 }
30114 #endif
30115@@ -898,6 +1010,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
30116 return 1;
30117 }
30118
30119+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
30120+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
30121+{
30122+ pte_t *pte;
30123+ pmd_t *pmd;
30124+ spinlock_t *ptl;
30125+ unsigned char pte_mask;
30126+
30127+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
30128+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
30129+ return 0;
30130+
30131+ /* PaX: it's our fault, let's handle it if we can */
30132+
30133+ /* PaX: take a look at read faults before acquiring any locks */
30134+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
30135+ /* instruction fetch attempt from a protected page in user mode */
30136+ up_read(&mm->mmap_sem);
30137+
30138+#ifdef CONFIG_PAX_EMUTRAMP
30139+ switch (pax_handle_fetch_fault(regs)) {
30140+ case 2:
30141+ return 1;
30142+ }
30143+#endif
30144+
30145+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
30146+ do_group_exit(SIGKILL);
30147+ }
30148+
30149+ pmd = pax_get_pmd(mm, address);
30150+ if (unlikely(!pmd))
30151+ return 0;
30152+
30153+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
30154+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
30155+ pte_unmap_unlock(pte, ptl);
30156+ return 0;
30157+ }
30158+
30159+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
30160+ /* write attempt to a protected page in user mode */
30161+ pte_unmap_unlock(pte, ptl);
30162+ return 0;
30163+ }
30164+
30165+#ifdef CONFIG_SMP
30166+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
30167+#else
30168+ if (likely(address > get_limit(regs->cs)))
30169+#endif
30170+ {
30171+ set_pte(pte, pte_mkread(*pte));
30172+ __flush_tlb_one(address);
30173+ pte_unmap_unlock(pte, ptl);
30174+ up_read(&mm->mmap_sem);
30175+ return 1;
30176+ }
30177+
30178+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
30179+
30180+ /*
30181+ * PaX: fill DTLB with user rights and retry
30182+ */
30183+ __asm__ __volatile__ (
30184+ "orb %2,(%1)\n"
30185+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
30186+/*
30187+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
30188+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
30189+ * page fault when examined during a TLB load attempt. this is true not only
30190+ * for PTEs holding a non-present entry but also present entries that will
30191+ * raise a page fault (such as those set up by PaX, or the copy-on-write
30192+ * mechanism). in effect it means that we do *not* need to flush the TLBs
30193+ * for our target pages since their PTEs are simply not in the TLBs at all.
30194+
30195+ * the best thing in omitting it is that we gain around 15-20% speed in the
30196+ * fast path of the page fault handler and can get rid of tracing since we
30197+ * can no longer flush unintended entries.
30198+ */
30199+ "invlpg (%0)\n"
30200+#endif
30201+ __copyuser_seg"testb $0,(%0)\n"
30202+ "xorb %3,(%1)\n"
30203+ :
30204+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
30205+ : "memory", "cc");
30206+ pte_unmap_unlock(pte, ptl);
30207+ up_read(&mm->mmap_sem);
30208+ return 1;
30209+}
30210+#endif
30211+
30212 /*
30213 * Handle a spurious fault caused by a stale TLB entry.
30214 *
30215@@ -964,6 +1169,9 @@ int show_unhandled_signals = 1;
30216 static inline int
30217 access_error(unsigned long error_code, struct vm_area_struct *vma)
30218 {
30219+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
30220+ return 1;
30221+
30222 if (error_code & PF_WRITE) {
30223 /* write, present and write, not present: */
30224 if (unlikely(!(vma->vm_flags & VM_WRITE)))
30225@@ -992,7 +1200,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
30226 if (error_code & PF_USER)
30227 return false;
30228
30229- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
30230+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
30231 return false;
30232
30233 return true;
30234@@ -1008,18 +1216,33 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
30235 {
30236 struct vm_area_struct *vma;
30237 struct task_struct *tsk;
30238- unsigned long address;
30239 struct mm_struct *mm;
30240 int fault;
30241 int write = error_code & PF_WRITE;
30242 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
30243 (write ? FAULT_FLAG_WRITE : 0);
30244
30245- tsk = current;
30246- mm = tsk->mm;
30247-
30248 /* Get the faulting address: */
30249- address = read_cr2();
30250+ unsigned long address = read_cr2();
30251+
30252+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30253+ if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
30254+ if (!search_exception_tables(regs->ip)) {
30255+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
30256+ bad_area_nosemaphore(regs, error_code, address);
30257+ return;
30258+ }
30259+ if (address < pax_user_shadow_base) {
30260+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
30261+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
30262+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
30263+ } else
30264+ address -= pax_user_shadow_base;
30265+ }
30266+#endif
30267+
30268+ tsk = current;
30269+ mm = tsk->mm;
30270
30271 /*
30272 * Detect and handle instructions that would cause a page fault for
30273@@ -1080,7 +1303,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
30274 * User-mode registers count as a user access even for any
30275 * potential system fault or CPU buglet:
30276 */
30277- if (user_mode_vm(regs)) {
30278+ if (user_mode(regs)) {
30279 local_irq_enable();
30280 error_code |= PF_USER;
30281 } else {
30282@@ -1142,6 +1365,11 @@ retry:
30283 might_sleep();
30284 }
30285
30286+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
30287+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
30288+ return;
30289+#endif
30290+
30291 vma = find_vma(mm, address);
30292 if (unlikely(!vma)) {
30293 bad_area(regs, error_code, address);
30294@@ -1153,18 +1381,24 @@ retry:
30295 bad_area(regs, error_code, address);
30296 return;
30297 }
30298- if (error_code & PF_USER) {
30299- /*
30300- * Accessing the stack below %sp is always a bug.
30301- * The large cushion allows instructions like enter
30302- * and pusha to work. ("enter $65535, $31" pushes
30303- * 32 pointers and then decrements %sp by 65535.)
30304- */
30305- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
30306- bad_area(regs, error_code, address);
30307- return;
30308- }
30309+ /*
30310+ * Accessing the stack below %sp is always a bug.
30311+ * The large cushion allows instructions like enter
30312+ * and pusha to work. ("enter $65535, $31" pushes
30313+ * 32 pointers and then decrements %sp by 65535.)
30314+ */
30315+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
30316+ bad_area(regs, error_code, address);
30317+ return;
30318 }
30319+
30320+#ifdef CONFIG_PAX_SEGMEXEC
30321+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
30322+ bad_area(regs, error_code, address);
30323+ return;
30324+ }
30325+#endif
30326+
30327 if (unlikely(expand_stack(vma, address))) {
30328 bad_area(regs, error_code, address);
30329 return;
30330@@ -1230,3 +1464,292 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
30331 __do_page_fault(regs, error_code);
30332 exception_exit(prev_state);
30333 }
30334+
30335+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
30336+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
30337+{
30338+ struct mm_struct *mm = current->mm;
30339+ unsigned long ip = regs->ip;
30340+
30341+ if (v8086_mode(regs))
30342+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
30343+
30344+#ifdef CONFIG_PAX_PAGEEXEC
30345+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
30346+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
30347+ return true;
30348+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
30349+ return true;
30350+ return false;
30351+ }
30352+#endif
30353+
30354+#ifdef CONFIG_PAX_SEGMEXEC
30355+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
30356+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
30357+ return true;
30358+ return false;
30359+ }
30360+#endif
30361+
30362+ return false;
30363+}
30364+#endif
30365+
30366+#ifdef CONFIG_PAX_EMUTRAMP
30367+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
30368+{
30369+ int err;
30370+
30371+ do { /* PaX: libffi trampoline emulation */
30372+ unsigned char mov, jmp;
30373+ unsigned int addr1, addr2;
30374+
30375+#ifdef CONFIG_X86_64
30376+ if ((regs->ip + 9) >> 32)
30377+ break;
30378+#endif
30379+
30380+ err = get_user(mov, (unsigned char __user *)regs->ip);
30381+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
30382+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
30383+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
30384+
30385+ if (err)
30386+ break;
30387+
30388+ if (mov == 0xB8 && jmp == 0xE9) {
30389+ regs->ax = addr1;
30390+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
30391+ return 2;
30392+ }
30393+ } while (0);
30394+
30395+ do { /* PaX: gcc trampoline emulation #1 */
30396+ unsigned char mov1, mov2;
30397+ unsigned short jmp;
30398+ unsigned int addr1, addr2;
30399+
30400+#ifdef CONFIG_X86_64
30401+ if ((regs->ip + 11) >> 32)
30402+ break;
30403+#endif
30404+
30405+ err = get_user(mov1, (unsigned char __user *)regs->ip);
30406+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
30407+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
30408+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
30409+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
30410+
30411+ if (err)
30412+ break;
30413+
30414+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
30415+ regs->cx = addr1;
30416+ regs->ax = addr2;
30417+ regs->ip = addr2;
30418+ return 2;
30419+ }
30420+ } while (0);
30421+
30422+ do { /* PaX: gcc trampoline emulation #2 */
30423+ unsigned char mov, jmp;
30424+ unsigned int addr1, addr2;
30425+
30426+#ifdef CONFIG_X86_64
30427+ if ((regs->ip + 9) >> 32)
30428+ break;
30429+#endif
30430+
30431+ err = get_user(mov, (unsigned char __user *)regs->ip);
30432+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
30433+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
30434+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
30435+
30436+ if (err)
30437+ break;
30438+
30439+ if (mov == 0xB9 && jmp == 0xE9) {
30440+ regs->cx = addr1;
30441+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
30442+ return 2;
30443+ }
30444+ } while (0);
30445+
30446+ return 1; /* PaX in action */
30447+}
30448+
30449+#ifdef CONFIG_X86_64
30450+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
30451+{
30452+ int err;
30453+
30454+ do { /* PaX: libffi trampoline emulation */
30455+ unsigned short mov1, mov2, jmp1;
30456+ unsigned char stcclc, jmp2;
30457+ unsigned long addr1, addr2;
30458+
30459+ err = get_user(mov1, (unsigned short __user *)regs->ip);
30460+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
30461+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
30462+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
30463+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
30464+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
30465+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
30466+
30467+ if (err)
30468+ break;
30469+
30470+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
30471+ regs->r11 = addr1;
30472+ regs->r10 = addr2;
30473+ if (stcclc == 0xF8)
30474+ regs->flags &= ~X86_EFLAGS_CF;
30475+ else
30476+ regs->flags |= X86_EFLAGS_CF;
30477+ regs->ip = addr1;
30478+ return 2;
30479+ }
30480+ } while (0);
30481+
30482+ do { /* PaX: gcc trampoline emulation #1 */
30483+ unsigned short mov1, mov2, jmp1;
30484+ unsigned char jmp2;
30485+ unsigned int addr1;
30486+ unsigned long addr2;
30487+
30488+ err = get_user(mov1, (unsigned short __user *)regs->ip);
30489+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
30490+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
30491+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
30492+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
30493+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
30494+
30495+ if (err)
30496+ break;
30497+
30498+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
30499+ regs->r11 = addr1;
30500+ regs->r10 = addr2;
30501+ regs->ip = addr1;
30502+ return 2;
30503+ }
30504+ } while (0);
30505+
30506+ do { /* PaX: gcc trampoline emulation #2 */
30507+ unsigned short mov1, mov2, jmp1;
30508+ unsigned char jmp2;
30509+ unsigned long addr1, addr2;
30510+
30511+ err = get_user(mov1, (unsigned short __user *)regs->ip);
30512+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
30513+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
30514+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
30515+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
30516+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
30517+
30518+ if (err)
30519+ break;
30520+
30521+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
30522+ regs->r11 = addr1;
30523+ regs->r10 = addr2;
30524+ regs->ip = addr1;
30525+ return 2;
30526+ }
30527+ } while (0);
30528+
30529+ return 1; /* PaX in action */
30530+}
30531+#endif
30532+
30533+/*
30534+ * PaX: decide what to do with offenders (regs->ip = fault address)
30535+ *
30536+ * returns 1 when task should be killed
30537+ * 2 when gcc trampoline was detected
30538+ */
30539+static int pax_handle_fetch_fault(struct pt_regs *regs)
30540+{
30541+ if (v8086_mode(regs))
30542+ return 1;
30543+
30544+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
30545+ return 1;
30546+
30547+#ifdef CONFIG_X86_32
30548+ return pax_handle_fetch_fault_32(regs);
30549+#else
30550+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
30551+ return pax_handle_fetch_fault_32(regs);
30552+ else
30553+ return pax_handle_fetch_fault_64(regs);
30554+#endif
30555+}
30556+#endif
30557+
30558+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
30559+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
30560+{
30561+ long i;
30562+
30563+ printk(KERN_ERR "PAX: bytes at PC: ");
30564+ for (i = 0; i < 20; i++) {
30565+ unsigned char c;
30566+ if (get_user(c, (unsigned char __force_user *)pc+i))
30567+ printk(KERN_CONT "?? ");
30568+ else
30569+ printk(KERN_CONT "%02x ", c);
30570+ }
30571+ printk("\n");
30572+
30573+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
30574+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
30575+ unsigned long c;
30576+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
30577+#ifdef CONFIG_X86_32
30578+ printk(KERN_CONT "???????? ");
30579+#else
30580+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
30581+ printk(KERN_CONT "???????? ???????? ");
30582+ else
30583+ printk(KERN_CONT "???????????????? ");
30584+#endif
30585+ } else {
30586+#ifdef CONFIG_X86_64
30587+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
30588+ printk(KERN_CONT "%08x ", (unsigned int)c);
30589+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
30590+ } else
30591+#endif
30592+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
30593+ }
30594+ }
30595+ printk("\n");
30596+}
30597+#endif
30598+
30599+/**
30600+ * probe_kernel_write(): safely attempt to write to a location
30601+ * @dst: address to write to
30602+ * @src: pointer to the data that shall be written
30603+ * @size: size of the data chunk
30604+ *
30605+ * Safely write to address @dst from the buffer at @src. If a kernel fault
30606+ * happens, handle that and return -EFAULT.
30607+ */
30608+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
30609+{
30610+ long ret;
30611+ mm_segment_t old_fs = get_fs();
30612+
30613+ set_fs(KERNEL_DS);
30614+ pagefault_disable();
30615+ pax_open_kernel();
30616+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
30617+ pax_close_kernel();
30618+ pagefault_enable();
30619+ set_fs(old_fs);
30620+
30621+ return ret ? -EFAULT : 0;
30622+}
30623diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
30624index dd74e46..7d26398 100644
30625--- a/arch/x86/mm/gup.c
30626+++ b/arch/x86/mm/gup.c
30627@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
30628 addr = start;
30629 len = (unsigned long) nr_pages << PAGE_SHIFT;
30630 end = start + len;
30631- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
30632+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
30633 (void __user *)start, len)))
30634 return 0;
30635
30636diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
30637index 252b8f5..4dcfdc1 100644
30638--- a/arch/x86/mm/highmem_32.c
30639+++ b/arch/x86/mm/highmem_32.c
30640@@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
30641 idx = type + KM_TYPE_NR*smp_processor_id();
30642 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
30643 BUG_ON(!pte_none(*(kmap_pte-idx)));
30644+
30645+ pax_open_kernel();
30646 set_pte(kmap_pte-idx, mk_pte(page, prot));
30647+ pax_close_kernel();
30648+
30649 arch_flush_lazy_mmu_mode();
30650
30651 return (void *)vaddr;
30652diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
30653index ae1aa71..d9bea75 100644
30654--- a/arch/x86/mm/hugetlbpage.c
30655+++ b/arch/x86/mm/hugetlbpage.c
30656@@ -271,23 +271,30 @@ follow_huge_pud(struct mm_struct *mm, unsigned long address,
30657 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
30658 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
30659 unsigned long addr, unsigned long len,
30660- unsigned long pgoff, unsigned long flags)
30661+ unsigned long pgoff, unsigned long flags, unsigned long offset)
30662 {
30663 struct hstate *h = hstate_file(file);
30664 struct vm_unmapped_area_info info;
30665-
30666+
30667 info.flags = 0;
30668 info.length = len;
30669 info.low_limit = TASK_UNMAPPED_BASE;
30670+
30671+#ifdef CONFIG_PAX_RANDMMAP
30672+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
30673+ info.low_limit += current->mm->delta_mmap;
30674+#endif
30675+
30676 info.high_limit = TASK_SIZE;
30677 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
30678 info.align_offset = 0;
30679+ info.threadstack_offset = offset;
30680 return vm_unmapped_area(&info);
30681 }
30682
30683 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
30684 unsigned long addr0, unsigned long len,
30685- unsigned long pgoff, unsigned long flags)
30686+ unsigned long pgoff, unsigned long flags, unsigned long offset)
30687 {
30688 struct hstate *h = hstate_file(file);
30689 struct vm_unmapped_area_info info;
30690@@ -299,6 +306,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
30691 info.high_limit = current->mm->mmap_base;
30692 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
30693 info.align_offset = 0;
30694+ info.threadstack_offset = offset;
30695 addr = vm_unmapped_area(&info);
30696
30697 /*
30698@@ -311,6 +319,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
30699 VM_BUG_ON(addr != -ENOMEM);
30700 info.flags = 0;
30701 info.low_limit = TASK_UNMAPPED_BASE;
30702+
30703+#ifdef CONFIG_PAX_RANDMMAP
30704+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
30705+ info.low_limit += current->mm->delta_mmap;
30706+#endif
30707+
30708 info.high_limit = TASK_SIZE;
30709 addr = vm_unmapped_area(&info);
30710 }
30711@@ -325,10 +339,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
30712 struct hstate *h = hstate_file(file);
30713 struct mm_struct *mm = current->mm;
30714 struct vm_area_struct *vma;
30715+ unsigned long pax_task_size = TASK_SIZE;
30716+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
30717
30718 if (len & ~huge_page_mask(h))
30719 return -EINVAL;
30720- if (len > TASK_SIZE)
30721+
30722+#ifdef CONFIG_PAX_SEGMEXEC
30723+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
30724+ pax_task_size = SEGMEXEC_TASK_SIZE;
30725+#endif
30726+
30727+ pax_task_size -= PAGE_SIZE;
30728+
30729+ if (len > pax_task_size)
30730 return -ENOMEM;
30731
30732 if (flags & MAP_FIXED) {
30733@@ -337,19 +361,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
30734 return addr;
30735 }
30736
30737+#ifdef CONFIG_PAX_RANDMMAP
30738+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
30739+#endif
30740+
30741 if (addr) {
30742 addr = ALIGN(addr, huge_page_size(h));
30743 vma = find_vma(mm, addr);
30744- if (TASK_SIZE - len >= addr &&
30745- (!vma || addr + len <= vma->vm_start))
30746+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
30747 return addr;
30748 }
30749 if (mm->get_unmapped_area == arch_get_unmapped_area)
30750 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
30751- pgoff, flags);
30752+ pgoff, flags, offset);
30753 else
30754 return hugetlb_get_unmapped_area_topdown(file, addr, len,
30755- pgoff, flags);
30756+ pgoff, flags, offset);
30757 }
30758
30759 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
30760diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
30761index 1f34e92..c97b98f 100644
30762--- a/arch/x86/mm/init.c
30763+++ b/arch/x86/mm/init.c
30764@@ -4,6 +4,7 @@
30765 #include <linux/swap.h>
30766 #include <linux/memblock.h>
30767 #include <linux/bootmem.h> /* for max_low_pfn */
30768+#include <linux/tboot.h>
30769
30770 #include <asm/cacheflush.h>
30771 #include <asm/e820.h>
30772@@ -17,6 +18,8 @@
30773 #include <asm/proto.h>
30774 #include <asm/dma.h> /* for MAX_DMA_PFN */
30775 #include <asm/microcode.h>
30776+#include <asm/desc.h>
30777+#include <asm/bios_ebda.h>
30778
30779 #include "mm_internal.h"
30780
30781@@ -465,7 +468,18 @@ void __init init_mem_mapping(void)
30782 early_ioremap_page_table_range_init();
30783 #endif
30784
30785+#ifdef CONFIG_PAX_PER_CPU_PGD
30786+ clone_pgd_range(get_cpu_pgd(0, kernel) + KERNEL_PGD_BOUNDARY,
30787+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
30788+ KERNEL_PGD_PTRS);
30789+ clone_pgd_range(get_cpu_pgd(0, user) + KERNEL_PGD_BOUNDARY,
30790+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
30791+ KERNEL_PGD_PTRS);
30792+ load_cr3(get_cpu_pgd(0, kernel));
30793+#else
30794 load_cr3(swapper_pg_dir);
30795+#endif
30796+
30797 __flush_tlb_all();
30798
30799 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
30800@@ -481,10 +495,40 @@ void __init init_mem_mapping(void)
30801 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
30802 * mmio resources as well as potential bios/acpi data regions.
30803 */
30804+
30805+#ifdef CONFIG_GRKERNSEC_KMEM
30806+static unsigned int ebda_start __read_only;
30807+static unsigned int ebda_end __read_only;
30808+#endif
30809+
30810 int devmem_is_allowed(unsigned long pagenr)
30811 {
30812- if (pagenr < 256)
30813+#ifdef CONFIG_GRKERNSEC_KMEM
30814+ /* allow BDA */
30815+ if (!pagenr)
30816 return 1;
30817+ /* allow EBDA */
30818+ if (pagenr >= ebda_start && pagenr < ebda_end)
30819+ return 1;
30820+ /* if tboot is in use, allow access to its hardcoded serial log range */
30821+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
30822+ return 1;
30823+#else
30824+ if (!pagenr)
30825+ return 1;
30826+#ifdef CONFIG_VM86
30827+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
30828+ return 1;
30829+#endif
30830+#endif
30831+
30832+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
30833+ return 1;
30834+#ifdef CONFIG_GRKERNSEC_KMEM
30835+ /* throw out everything else below 1MB */
30836+ if (pagenr <= 256)
30837+ return 0;
30838+#endif
30839 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
30840 return 0;
30841 if (!page_is_ram(pagenr))
30842@@ -538,8 +582,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
30843 #endif
30844 }
30845
30846+#ifdef CONFIG_GRKERNSEC_KMEM
30847+static inline void gr_init_ebda(void)
30848+{
30849+ unsigned int ebda_addr;
30850+ unsigned int ebda_size = 0;
30851+
30852+ ebda_addr = get_bios_ebda();
30853+ if (ebda_addr) {
30854+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
30855+ ebda_size <<= 10;
30856+ }
30857+ if (ebda_addr && ebda_size) {
30858+ ebda_start = ebda_addr >> PAGE_SHIFT;
30859+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
30860+ } else {
30861+ ebda_start = 0x9f000 >> PAGE_SHIFT;
30862+ ebda_end = 0xa0000 >> PAGE_SHIFT;
30863+ }
30864+}
30865+#else
30866+static inline void gr_init_ebda(void) { }
30867+#endif
30868+
30869 void free_initmem(void)
30870 {
30871+#ifdef CONFIG_PAX_KERNEXEC
30872+#ifdef CONFIG_X86_32
30873+ /* PaX: limit KERNEL_CS to actual size */
30874+ unsigned long addr, limit;
30875+ struct desc_struct d;
30876+ int cpu;
30877+#else
30878+ pgd_t *pgd;
30879+ pud_t *pud;
30880+ pmd_t *pmd;
30881+ unsigned long addr, end;
30882+#endif
30883+#endif
30884+
30885+ gr_init_ebda();
30886+
30887+#ifdef CONFIG_PAX_KERNEXEC
30888+#ifdef CONFIG_X86_32
30889+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
30890+ limit = (limit - 1UL) >> PAGE_SHIFT;
30891+
30892+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
30893+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
30894+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
30895+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
30896+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
30897+ }
30898+
30899+ /* PaX: make KERNEL_CS read-only */
30900+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
30901+ if (!paravirt_enabled())
30902+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
30903+/*
30904+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
30905+ pgd = pgd_offset_k(addr);
30906+ pud = pud_offset(pgd, addr);
30907+ pmd = pmd_offset(pud, addr);
30908+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
30909+ }
30910+*/
30911+#ifdef CONFIG_X86_PAE
30912+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
30913+/*
30914+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
30915+ pgd = pgd_offset_k(addr);
30916+ pud = pud_offset(pgd, addr);
30917+ pmd = pmd_offset(pud, addr);
30918+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
30919+ }
30920+*/
30921+#endif
30922+
30923+#ifdef CONFIG_MODULES
30924+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
30925+#endif
30926+
30927+#else
30928+ /* PaX: make kernel code/rodata read-only, rest non-executable */
30929+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
30930+ pgd = pgd_offset_k(addr);
30931+ pud = pud_offset(pgd, addr);
30932+ pmd = pmd_offset(pud, addr);
30933+ if (!pmd_present(*pmd))
30934+ continue;
30935+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
30936+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
30937+ else
30938+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
30939+ }
30940+
30941+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
30942+ end = addr + KERNEL_IMAGE_SIZE;
30943+ for (; addr < end; addr += PMD_SIZE) {
30944+ pgd = pgd_offset_k(addr);
30945+ pud = pud_offset(pgd, addr);
30946+ pmd = pmd_offset(pud, addr);
30947+ if (!pmd_present(*pmd))
30948+ continue;
30949+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
30950+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
30951+ }
30952+#endif
30953+
30954+ flush_tlb_all();
30955+#endif
30956+
30957 free_init_pages("unused kernel memory",
30958 (unsigned long)(&__init_begin),
30959 (unsigned long)(&__init_end));
30960diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
30961index 3ac7e31..89611b7 100644
30962--- a/arch/x86/mm/init_32.c
30963+++ b/arch/x86/mm/init_32.c
30964@@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
30965 bool __read_mostly __vmalloc_start_set = false;
30966
30967 /*
30968- * Creates a middle page table and puts a pointer to it in the
30969- * given global directory entry. This only returns the gd entry
30970- * in non-PAE compilation mode, since the middle layer is folded.
30971- */
30972-static pmd_t * __init one_md_table_init(pgd_t *pgd)
30973-{
30974- pud_t *pud;
30975- pmd_t *pmd_table;
30976-
30977-#ifdef CONFIG_X86_PAE
30978- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
30979- pmd_table = (pmd_t *)alloc_low_page();
30980- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
30981- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
30982- pud = pud_offset(pgd, 0);
30983- BUG_ON(pmd_table != pmd_offset(pud, 0));
30984-
30985- return pmd_table;
30986- }
30987-#endif
30988- pud = pud_offset(pgd, 0);
30989- pmd_table = pmd_offset(pud, 0);
30990-
30991- return pmd_table;
30992-}
30993-
30994-/*
30995 * Create a page table and place a pointer to it in a middle page
30996 * directory entry:
30997 */
30998@@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
30999 pte_t *page_table = (pte_t *)alloc_low_page();
31000
31001 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
31002+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31003+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
31004+#else
31005 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
31006+#endif
31007 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
31008 }
31009
31010 return pte_offset_kernel(pmd, 0);
31011 }
31012
31013+static pmd_t * __init one_md_table_init(pgd_t *pgd)
31014+{
31015+ pud_t *pud;
31016+ pmd_t *pmd_table;
31017+
31018+ pud = pud_offset(pgd, 0);
31019+ pmd_table = pmd_offset(pud, 0);
31020+
31021+ return pmd_table;
31022+}
31023+
31024 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
31025 {
31026 int pgd_idx = pgd_index(vaddr);
31027@@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
31028 int pgd_idx, pmd_idx;
31029 unsigned long vaddr;
31030 pgd_t *pgd;
31031+ pud_t *pud;
31032 pmd_t *pmd;
31033 pte_t *pte = NULL;
31034 unsigned long count = page_table_range_init_count(start, end);
31035@@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
31036 pgd = pgd_base + pgd_idx;
31037
31038 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
31039- pmd = one_md_table_init(pgd);
31040- pmd = pmd + pmd_index(vaddr);
31041+ pud = pud_offset(pgd, vaddr);
31042+ pmd = pmd_offset(pud, vaddr);
31043+
31044+#ifdef CONFIG_X86_PAE
31045+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
31046+#endif
31047+
31048 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
31049 pmd++, pmd_idx++) {
31050 pte = page_table_kmap_check(one_page_table_init(pmd),
31051@@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
31052 }
31053 }
31054
31055-static inline int is_kernel_text(unsigned long addr)
31056+static inline int is_kernel_text(unsigned long start, unsigned long end)
31057 {
31058- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
31059- return 1;
31060- return 0;
31061+ if ((start > ktla_ktva((unsigned long)_etext) ||
31062+ end <= ktla_ktva((unsigned long)_stext)) &&
31063+ (start > ktla_ktva((unsigned long)_einittext) ||
31064+ end <= ktla_ktva((unsigned long)_sinittext)) &&
31065+
31066+#ifdef CONFIG_ACPI_SLEEP
31067+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
31068+#endif
31069+
31070+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
31071+ return 0;
31072+ return 1;
31073 }
31074
31075 /*
31076@@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
31077 unsigned long last_map_addr = end;
31078 unsigned long start_pfn, end_pfn;
31079 pgd_t *pgd_base = swapper_pg_dir;
31080- int pgd_idx, pmd_idx, pte_ofs;
31081+ unsigned int pgd_idx, pmd_idx, pte_ofs;
31082 unsigned long pfn;
31083 pgd_t *pgd;
31084+ pud_t *pud;
31085 pmd_t *pmd;
31086 pte_t *pte;
31087 unsigned pages_2m, pages_4k;
31088@@ -291,8 +295,13 @@ repeat:
31089 pfn = start_pfn;
31090 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
31091 pgd = pgd_base + pgd_idx;
31092- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
31093- pmd = one_md_table_init(pgd);
31094+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
31095+ pud = pud_offset(pgd, 0);
31096+ pmd = pmd_offset(pud, 0);
31097+
31098+#ifdef CONFIG_X86_PAE
31099+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
31100+#endif
31101
31102 if (pfn >= end_pfn)
31103 continue;
31104@@ -304,14 +313,13 @@ repeat:
31105 #endif
31106 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
31107 pmd++, pmd_idx++) {
31108- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
31109+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
31110
31111 /*
31112 * Map with big pages if possible, otherwise
31113 * create normal page tables:
31114 */
31115 if (use_pse) {
31116- unsigned int addr2;
31117 pgprot_t prot = PAGE_KERNEL_LARGE;
31118 /*
31119 * first pass will use the same initial
31120@@ -322,11 +330,7 @@ repeat:
31121 _PAGE_PSE);
31122
31123 pfn &= PMD_MASK >> PAGE_SHIFT;
31124- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
31125- PAGE_OFFSET + PAGE_SIZE-1;
31126-
31127- if (is_kernel_text(addr) ||
31128- is_kernel_text(addr2))
31129+ if (is_kernel_text(address, address + PMD_SIZE))
31130 prot = PAGE_KERNEL_LARGE_EXEC;
31131
31132 pages_2m++;
31133@@ -343,7 +347,7 @@ repeat:
31134 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
31135 pte += pte_ofs;
31136 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
31137- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
31138+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
31139 pgprot_t prot = PAGE_KERNEL;
31140 /*
31141 * first pass will use the same initial
31142@@ -351,7 +355,7 @@ repeat:
31143 */
31144 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
31145
31146- if (is_kernel_text(addr))
31147+ if (is_kernel_text(address, address + PAGE_SIZE))
31148 prot = PAGE_KERNEL_EXEC;
31149
31150 pages_4k++;
31151@@ -474,7 +478,7 @@ void __init native_pagetable_init(void)
31152
31153 pud = pud_offset(pgd, va);
31154 pmd = pmd_offset(pud, va);
31155- if (!pmd_present(*pmd))
31156+ if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
31157 break;
31158
31159 /* should not be large page here */
31160@@ -532,12 +536,10 @@ void __init early_ioremap_page_table_range_init(void)
31161
31162 static void __init pagetable_init(void)
31163 {
31164- pgd_t *pgd_base = swapper_pg_dir;
31165-
31166- permanent_kmaps_init(pgd_base);
31167+ permanent_kmaps_init(swapper_pg_dir);
31168 }
31169
31170-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
31171+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
31172 EXPORT_SYMBOL_GPL(__supported_pte_mask);
31173
31174 /* user-defined highmem size */
31175@@ -772,7 +774,7 @@ void __init mem_init(void)
31176 after_bootmem = 1;
31177
31178 codesize = (unsigned long) &_etext - (unsigned long) &_text;
31179- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
31180+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
31181 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
31182
31183 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
31184@@ -813,10 +815,10 @@ void __init mem_init(void)
31185 ((unsigned long)&__init_end -
31186 (unsigned long)&__init_begin) >> 10,
31187
31188- (unsigned long)&_etext, (unsigned long)&_edata,
31189- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
31190+ (unsigned long)&_sdata, (unsigned long)&_edata,
31191+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
31192
31193- (unsigned long)&_text, (unsigned long)&_etext,
31194+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
31195 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
31196
31197 /*
31198@@ -906,6 +908,7 @@ void set_kernel_text_rw(void)
31199 if (!kernel_set_to_readonly)
31200 return;
31201
31202+ start = ktla_ktva(start);
31203 pr_debug("Set kernel text: %lx - %lx for read write\n",
31204 start, start+size);
31205
31206@@ -920,6 +923,7 @@ void set_kernel_text_ro(void)
31207 if (!kernel_set_to_readonly)
31208 return;
31209
31210+ start = ktla_ktva(start);
31211 pr_debug("Set kernel text: %lx - %lx for read only\n",
31212 start, start+size);
31213
31214@@ -948,6 +952,7 @@ void mark_rodata_ro(void)
31215 unsigned long start = PFN_ALIGN(_text);
31216 unsigned long size = PFN_ALIGN(_etext) - start;
31217
31218+ start = ktla_ktva(start);
31219 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
31220 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
31221 size >> 10);
31222diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
31223index bb00c46..bf91a67 100644
31224--- a/arch/x86/mm/init_64.c
31225+++ b/arch/x86/mm/init_64.c
31226@@ -151,7 +151,7 @@ early_param("gbpages", parse_direct_gbpages_on);
31227 * around without checking the pgd every time.
31228 */
31229
31230-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
31231+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
31232 EXPORT_SYMBOL_GPL(__supported_pte_mask);
31233
31234 int force_personality32;
31235@@ -184,12 +184,29 @@ void sync_global_pgds(unsigned long start, unsigned long end)
31236
31237 for (address = start; address <= end; address += PGDIR_SIZE) {
31238 const pgd_t *pgd_ref = pgd_offset_k(address);
31239+
31240+#ifdef CONFIG_PAX_PER_CPU_PGD
31241+ unsigned long cpu;
31242+#else
31243 struct page *page;
31244+#endif
31245
31246 if (pgd_none(*pgd_ref))
31247 continue;
31248
31249 spin_lock(&pgd_lock);
31250+
31251+#ifdef CONFIG_PAX_PER_CPU_PGD
31252+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
31253+ pgd_t *pgd = pgd_offset_cpu(cpu, user, address);
31254+
31255+ if (pgd_none(*pgd))
31256+ set_pgd(pgd, *pgd_ref);
31257+ else
31258+ BUG_ON(pgd_page_vaddr(*pgd)
31259+ != pgd_page_vaddr(*pgd_ref));
31260+ pgd = pgd_offset_cpu(cpu, kernel, address);
31261+#else
31262 list_for_each_entry(page, &pgd_list, lru) {
31263 pgd_t *pgd;
31264 spinlock_t *pgt_lock;
31265@@ -198,6 +215,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
31266 /* the pgt_lock only for Xen */
31267 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
31268 spin_lock(pgt_lock);
31269+#endif
31270
31271 if (pgd_none(*pgd))
31272 set_pgd(pgd, *pgd_ref);
31273@@ -205,7 +223,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
31274 BUG_ON(pgd_page_vaddr(*pgd)
31275 != pgd_page_vaddr(*pgd_ref));
31276
31277+#ifndef CONFIG_PAX_PER_CPU_PGD
31278 spin_unlock(pgt_lock);
31279+#endif
31280+
31281 }
31282 spin_unlock(&pgd_lock);
31283 }
31284@@ -238,7 +259,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
31285 {
31286 if (pgd_none(*pgd)) {
31287 pud_t *pud = (pud_t *)spp_getpage();
31288- pgd_populate(&init_mm, pgd, pud);
31289+ pgd_populate_kernel(&init_mm, pgd, pud);
31290 if (pud != pud_offset(pgd, 0))
31291 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
31292 pud, pud_offset(pgd, 0));
31293@@ -250,7 +271,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
31294 {
31295 if (pud_none(*pud)) {
31296 pmd_t *pmd = (pmd_t *) spp_getpage();
31297- pud_populate(&init_mm, pud, pmd);
31298+ pud_populate_kernel(&init_mm, pud, pmd);
31299 if (pmd != pmd_offset(pud, 0))
31300 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
31301 pmd, pmd_offset(pud, 0));
31302@@ -279,7 +300,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
31303 pmd = fill_pmd(pud, vaddr);
31304 pte = fill_pte(pmd, vaddr);
31305
31306+ pax_open_kernel();
31307 set_pte(pte, new_pte);
31308+ pax_close_kernel();
31309
31310 /*
31311 * It's enough to flush this one mapping.
31312@@ -338,14 +361,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
31313 pgd = pgd_offset_k((unsigned long)__va(phys));
31314 if (pgd_none(*pgd)) {
31315 pud = (pud_t *) spp_getpage();
31316- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
31317- _PAGE_USER));
31318+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
31319 }
31320 pud = pud_offset(pgd, (unsigned long)__va(phys));
31321 if (pud_none(*pud)) {
31322 pmd = (pmd_t *) spp_getpage();
31323- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
31324- _PAGE_USER));
31325+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
31326 }
31327 pmd = pmd_offset(pud, phys);
31328 BUG_ON(!pmd_none(*pmd));
31329@@ -586,7 +607,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
31330 prot);
31331
31332 spin_lock(&init_mm.page_table_lock);
31333- pud_populate(&init_mm, pud, pmd);
31334+ pud_populate_kernel(&init_mm, pud, pmd);
31335 spin_unlock(&init_mm.page_table_lock);
31336 }
31337 __flush_tlb_all();
31338@@ -627,7 +648,7 @@ kernel_physical_mapping_init(unsigned long start,
31339 page_size_mask);
31340
31341 spin_lock(&init_mm.page_table_lock);
31342- pgd_populate(&init_mm, pgd, pud);
31343+ pgd_populate_kernel(&init_mm, pgd, pud);
31344 spin_unlock(&init_mm.page_table_lock);
31345 pgd_changed = true;
31346 }
31347@@ -1221,8 +1242,8 @@ int kern_addr_valid(unsigned long addr)
31348 static struct vm_area_struct gate_vma = {
31349 .vm_start = VSYSCALL_START,
31350 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
31351- .vm_page_prot = PAGE_READONLY_EXEC,
31352- .vm_flags = VM_READ | VM_EXEC
31353+ .vm_page_prot = PAGE_READONLY,
31354+ .vm_flags = VM_READ
31355 };
31356
31357 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
31358@@ -1256,7 +1277,7 @@ int in_gate_area_no_mm(unsigned long addr)
31359
31360 const char *arch_vma_name(struct vm_area_struct *vma)
31361 {
31362- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
31363+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
31364 return "[vdso]";
31365 if (vma == &gate_vma)
31366 return "[vsyscall]";
31367diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
31368index 7b179b4..6bd17777 100644
31369--- a/arch/x86/mm/iomap_32.c
31370+++ b/arch/x86/mm/iomap_32.c
31371@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
31372 type = kmap_atomic_idx_push();
31373 idx = type + KM_TYPE_NR * smp_processor_id();
31374 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
31375+
31376+ pax_open_kernel();
31377 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
31378+ pax_close_kernel();
31379+
31380 arch_flush_lazy_mmu_mode();
31381
31382 return (void *)vaddr;
31383diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
31384index 9a1e658..da003f3 100644
31385--- a/arch/x86/mm/ioremap.c
31386+++ b/arch/x86/mm/ioremap.c
31387@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
31388 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
31389 int is_ram = page_is_ram(pfn);
31390
31391- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
31392+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
31393 return NULL;
31394 WARN_ON_ONCE(is_ram);
31395 }
31396@@ -256,7 +256,7 @@ EXPORT_SYMBOL(ioremap_prot);
31397 *
31398 * Caller must ensure there is only one unmapping for the same pointer.
31399 */
31400-void iounmap(volatile void __iomem *addr)
31401+void iounmap(const volatile void __iomem *addr)
31402 {
31403 struct vm_struct *p, *o;
31404
31405@@ -310,6 +310,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
31406
31407 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
31408 if (page_is_ram(start >> PAGE_SHIFT))
31409+#ifdef CONFIG_HIGHMEM
31410+ if ((start >> PAGE_SHIFT) < max_low_pfn)
31411+#endif
31412 return __va(phys);
31413
31414 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
31415@@ -322,6 +325,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
31416 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
31417 {
31418 if (page_is_ram(phys >> PAGE_SHIFT))
31419+#ifdef CONFIG_HIGHMEM
31420+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
31421+#endif
31422 return;
31423
31424 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
31425@@ -339,7 +345,7 @@ static int __init early_ioremap_debug_setup(char *str)
31426 early_param("early_ioremap_debug", early_ioremap_debug_setup);
31427
31428 static __initdata int after_paging_init;
31429-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
31430+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
31431
31432 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
31433 {
31434@@ -376,8 +382,7 @@ void __init early_ioremap_init(void)
31435 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
31436
31437 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
31438- memset(bm_pte, 0, sizeof(bm_pte));
31439- pmd_populate_kernel(&init_mm, pmd, bm_pte);
31440+ pmd_populate_user(&init_mm, pmd, bm_pte);
31441
31442 /*
31443 * The boot-ioremap range spans multiple pmds, for which
31444diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
31445index d87dd6d..bf3fa66 100644
31446--- a/arch/x86/mm/kmemcheck/kmemcheck.c
31447+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
31448@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
31449 * memory (e.g. tracked pages)? For now, we need this to avoid
31450 * invoking kmemcheck for PnP BIOS calls.
31451 */
31452- if (regs->flags & X86_VM_MASK)
31453+ if (v8086_mode(regs))
31454 return false;
31455- if (regs->cs != __KERNEL_CS)
31456+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
31457 return false;
31458
31459 pte = kmemcheck_pte_lookup(address);
31460diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
e2b79cd1 31461index 5c1ae28..45f4ac9 100644
bb5f0bf8
AF
31462--- a/arch/x86/mm/mmap.c
31463+++ b/arch/x86/mm/mmap.c
31464@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
31465 * Leave an at least ~128 MB hole with possible stack randomization.
31466 */
31467 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
31468-#define MAX_GAP (TASK_SIZE/6*5)
31469+#define MAX_GAP (pax_task_size/6*5)
31470
31471 static int mmap_is_legacy(void)
31472 {
31473@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
31474 return rnd << PAGE_SHIFT;
31475 }
31476
31477-static unsigned long mmap_base(void)
31478+static unsigned long mmap_base(struct mm_struct *mm)
31479 {
31480 unsigned long gap = rlimit(RLIMIT_STACK);
31481+ unsigned long pax_task_size = TASK_SIZE;
31482+
31483+#ifdef CONFIG_PAX_SEGMEXEC
31484+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
31485+ pax_task_size = SEGMEXEC_TASK_SIZE;
31486+#endif
31487
31488 if (gap < MIN_GAP)
31489 gap = MIN_GAP;
31490 else if (gap > MAX_GAP)
31491 gap = MAX_GAP;
31492
31493- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
31494+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
31495 }
31496
31497 /*
31498 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
31499 * does, but not when emulating X86_32
31500 */
e2b79cd1
AF
31501-static unsigned long mmap_legacy_base(void)
31502+static unsigned long mmap_legacy_base(struct mm_struct *mm)
bb5f0bf8
AF
31503 {
31504- if (mmap_is_ia32())
31505+ if (mmap_is_ia32()) {
31506+
31507+#ifdef CONFIG_PAX_SEGMEXEC
31508+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
31509+ return SEGMEXEC_TASK_UNMAPPED_BASE;
31510+ else
31511+#endif
31512+
31513 return TASK_UNMAPPED_BASE;
31514- else
31515+ } else
31516 return TASK_UNMAPPED_BASE + mmap_rnd();
31517 }
31518
e2b79cd1
AF
31519@@ -112,8 +125,15 @@ static unsigned long mmap_legacy_base(void)
31520 */
bb5f0bf8
AF
31521 void arch_pick_mmap_layout(struct mm_struct *mm)
31522 {
e2b79cd1
AF
31523- mm->mmap_legacy_base = mmap_legacy_base();
31524- mm->mmap_base = mmap_base();
31525+ mm->mmap_legacy_base = mmap_legacy_base(mm);
31526+ mm->mmap_base = mmap_base(mm);
bb5f0bf8
AF
31527+
31528+#ifdef CONFIG_PAX_RANDMMAP
e2b79cd1
AF
31529+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
31530+ mm->mmap_legacy_base += mm->delta_mmap;
31531+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
31532+ }
bb5f0bf8 31533+#endif
e2b79cd1
AF
31534
31535 if (mmap_is_legacy()) {
31536 mm->mmap_base = mm->mmap_legacy_base;
bb5f0bf8
AF
31537diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
31538index dc0b727..f612039 100644
31539--- a/arch/x86/mm/mmio-mod.c
31540+++ b/arch/x86/mm/mmio-mod.c
31541@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
31542 break;
31543 default:
31544 {
31545- unsigned char *ip = (unsigned char *)instptr;
31546+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
31547 my_trace->opcode = MMIO_UNKNOWN_OP;
31548 my_trace->width = 0;
31549 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
31550@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
31551 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
31552 void __iomem *addr)
31553 {
31554- static atomic_t next_id;
31555+ static atomic_unchecked_t next_id;
31556 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
31557 /* These are page-unaligned. */
31558 struct mmiotrace_map map = {
31559@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
31560 .private = trace
31561 },
31562 .phys = offset,
31563- .id = atomic_inc_return(&next_id)
31564+ .id = atomic_inc_return_unchecked(&next_id)
31565 };
31566 map.map_id = trace->id;
31567
31568@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
31569 ioremap_trace_core(offset, size, addr);
31570 }
31571
31572-static void iounmap_trace_core(volatile void __iomem *addr)
31573+static void iounmap_trace_core(const volatile void __iomem *addr)
31574 {
31575 struct mmiotrace_map map = {
31576 .phys = 0,
31577@@ -328,7 +328,7 @@ not_enabled:
31578 }
31579 }
31580
31581-void mmiotrace_iounmap(volatile void __iomem *addr)
31582+void mmiotrace_iounmap(const volatile void __iomem *addr)
31583 {
31584 might_sleep();
31585 if (is_enabled()) /* recheck and proper locking in *_core() */
31586diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
31587index a71c4e2..301ae44 100644
31588--- a/arch/x86/mm/numa.c
31589+++ b/arch/x86/mm/numa.c
31590@@ -474,7 +474,7 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
31591 return true;
31592 }
31593
31594-static int __init numa_register_memblks(struct numa_meminfo *mi)
31595+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
31596 {
31597 unsigned long uninitialized_var(pfn_align);
31598 int i, nid;
31599diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
31600index d0b1773..4c3327c 100644
31601--- a/arch/x86/mm/pageattr-test.c
31602+++ b/arch/x86/mm/pageattr-test.c
31603@@ -36,7 +36,7 @@ enum {
31604
31605 static int pte_testbit(pte_t pte)
31606 {
31607- return pte_flags(pte) & _PAGE_UNUSED1;
31608+ return pte_flags(pte) & _PAGE_CPA_TEST;
31609 }
31610
31611 struct split_state {
31612diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
31613index bb32480..75f2f5e 100644
31614--- a/arch/x86/mm/pageattr.c
31615+++ b/arch/x86/mm/pageattr.c
31616@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
31617 */
31618 #ifdef CONFIG_PCI_BIOS
31619 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
31620- pgprot_val(forbidden) |= _PAGE_NX;
31621+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
31622 #endif
31623
31624 /*
31625@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
31626 * Does not cover __inittext since that is gone later on. On
31627 * 64bit we do not enforce !NX on the low mapping
31628 */
31629- if (within(address, (unsigned long)_text, (unsigned long)_etext))
31630- pgprot_val(forbidden) |= _PAGE_NX;
31631+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
31632+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
31633
31634+#ifdef CONFIG_DEBUG_RODATA
31635 /*
31636 * The .rodata section needs to be read-only. Using the pfn
31637 * catches all aliases.
31638@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
31639 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
31640 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
31641 pgprot_val(forbidden) |= _PAGE_RW;
31642+#endif
31643
31644 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
31645 /*
31646@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
31647 }
31648 #endif
31649
31650+#ifdef CONFIG_PAX_KERNEXEC
31651+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
31652+ pgprot_val(forbidden) |= _PAGE_RW;
31653+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
31654+ }
31655+#endif
31656+
31657 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
31658
31659 return prot;
31660@@ -400,23 +409,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
31661 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
31662 {
31663 /* change init_mm */
31664+ pax_open_kernel();
31665 set_pte_atomic(kpte, pte);
31666+
31667 #ifdef CONFIG_X86_32
31668 if (!SHARED_KERNEL_PMD) {
31669+
31670+#ifdef CONFIG_PAX_PER_CPU_PGD
31671+ unsigned long cpu;
31672+#else
31673 struct page *page;
31674+#endif
31675
31676+#ifdef CONFIG_PAX_PER_CPU_PGD
31677+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
31678+ pgd_t *pgd = get_cpu_pgd(cpu, kernel);
31679+#else
31680 list_for_each_entry(page, &pgd_list, lru) {
31681- pgd_t *pgd;
31682+ pgd_t *pgd = (pgd_t *)page_address(page);
31683+#endif
31684+
31685 pud_t *pud;
31686 pmd_t *pmd;
31687
31688- pgd = (pgd_t *)page_address(page) + pgd_index(address);
31689+ pgd += pgd_index(address);
31690 pud = pud_offset(pgd, address);
31691 pmd = pmd_offset(pud, address);
31692 set_pte_atomic((pte_t *)pmd, pte);
31693 }
31694 }
31695 #endif
31696+ pax_close_kernel();
31697 }
31698
31699 static int
31700diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
31701index 6574388..87e9bef 100644
31702--- a/arch/x86/mm/pat.c
31703+++ b/arch/x86/mm/pat.c
31704@@ -376,7 +376,7 @@ int free_memtype(u64 start, u64 end)
31705
31706 if (!entry) {
31707 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
31708- current->comm, current->pid, start, end - 1);
31709+ current->comm, task_pid_nr(current), start, end - 1);
31710 return -EINVAL;
31711 }
31712
31713@@ -506,8 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31714
31715 while (cursor < to) {
31716 if (!devmem_is_allowed(pfn)) {
31717- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
31718- current->comm, from, to - 1);
31719+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
31720+ current->comm, from, to - 1, cursor);
31721 return 0;
31722 }
31723 cursor += PAGE_SIZE;
31724@@ -577,7 +577,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
31725 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
31726 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
31727 "for [mem %#010Lx-%#010Lx]\n",
31728- current->comm, current->pid,
31729+ current->comm, task_pid_nr(current),
31730 cattr_name(flags),
31731 base, (unsigned long long)(base + size-1));
31732 return -EINVAL;
31733@@ -612,7 +612,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
31734 flags = lookup_memtype(paddr);
31735 if (want_flags != flags) {
31736 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
31737- current->comm, current->pid,
31738+ current->comm, task_pid_nr(current),
31739 cattr_name(want_flags),
31740 (unsigned long long)paddr,
31741 (unsigned long long)(paddr + size - 1),
31742@@ -634,7 +634,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
31743 free_memtype(paddr, paddr + size);
31744 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
31745 " for [mem %#010Lx-%#010Lx], got %s\n",
31746- current->comm, current->pid,
31747+ current->comm, task_pid_nr(current),
31748 cattr_name(want_flags),
31749 (unsigned long long)paddr,
31750 (unsigned long long)(paddr + size - 1),
31751diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
31752index 415f6c4..d319983 100644
31753--- a/arch/x86/mm/pat_rbtree.c
31754+++ b/arch/x86/mm/pat_rbtree.c
31755@@ -160,7 +160,7 @@ success:
31756
31757 failure:
31758 printk(KERN_INFO "%s:%d conflicting memory types "
31759- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
31760+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
31761 end, cattr_name(found_type), cattr_name(match->type));
31762 return -EBUSY;
31763 }
31764diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
31765index 9f0614d..92ae64a 100644
31766--- a/arch/x86/mm/pf_in.c
31767+++ b/arch/x86/mm/pf_in.c
31768@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
31769 int i;
31770 enum reason_type rv = OTHERS;
31771
31772- p = (unsigned char *)ins_addr;
31773+ p = (unsigned char *)ktla_ktva(ins_addr);
31774 p += skip_prefix(p, &prf);
31775 p += get_opcode(p, &opcode);
31776
31777@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
31778 struct prefix_bits prf;
31779 int i;
31780
31781- p = (unsigned char *)ins_addr;
31782+ p = (unsigned char *)ktla_ktva(ins_addr);
31783 p += skip_prefix(p, &prf);
31784 p += get_opcode(p, &opcode);
31785
31786@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
31787 struct prefix_bits prf;
31788 int i;
31789
31790- p = (unsigned char *)ins_addr;
31791+ p = (unsigned char *)ktla_ktva(ins_addr);
31792 p += skip_prefix(p, &prf);
31793 p += get_opcode(p, &opcode);
31794
31795@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
31796 struct prefix_bits prf;
31797 int i;
31798
31799- p = (unsigned char *)ins_addr;
31800+ p = (unsigned char *)ktla_ktva(ins_addr);
31801 p += skip_prefix(p, &prf);
31802 p += get_opcode(p, &opcode);
31803 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
31804@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
31805 struct prefix_bits prf;
31806 int i;
31807
31808- p = (unsigned char *)ins_addr;
31809+ p = (unsigned char *)ktla_ktva(ins_addr);
31810 p += skip_prefix(p, &prf);
31811 p += get_opcode(p, &opcode);
31812 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
31813diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
31814index 17fda6a..f7d54a0 100644
31815--- a/arch/x86/mm/pgtable.c
31816+++ b/arch/x86/mm/pgtable.c
31817@@ -91,10 +91,67 @@ static inline void pgd_list_del(pgd_t *pgd)
31818 list_del(&page->lru);
31819 }
31820
31821-#define UNSHARED_PTRS_PER_PGD \
31822- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
31823+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31824+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
31825
31826+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
31827+{
31828+ unsigned int count = USER_PGD_PTRS;
31829
31830+ if (!pax_user_shadow_base)
31831+ return;
31832+
31833+ while (count--)
31834+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
31835+}
31836+#endif
31837+
31838+#ifdef CONFIG_PAX_PER_CPU_PGD
31839+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
31840+{
31841+ unsigned int count = USER_PGD_PTRS;
31842+
31843+ while (count--) {
31844+ pgd_t pgd;
31845+
31846+#ifdef CONFIG_X86_64
31847+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
31848+#else
31849+ pgd = *src++;
31850+#endif
31851+
31852+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31853+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
31854+#endif
31855+
31856+ *dst++ = pgd;
31857+ }
31858+
31859+}
31860+#endif
31861+
31862+#ifdef CONFIG_X86_64
31863+#define pxd_t pud_t
31864+#define pyd_t pgd_t
31865+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
31866+#define pxd_free(mm, pud) pud_free((mm), (pud))
31867+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
31868+#define pyd_offset(mm, address) pgd_offset((mm), (address))
31869+#define PYD_SIZE PGDIR_SIZE
31870+#else
31871+#define pxd_t pmd_t
31872+#define pyd_t pud_t
31873+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
31874+#define pxd_free(mm, pud) pmd_free((mm), (pud))
31875+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
31876+#define pyd_offset(mm, address) pud_offset((mm), (address))
31877+#define PYD_SIZE PUD_SIZE
31878+#endif
31879+
31880+#ifdef CONFIG_PAX_PER_CPU_PGD
31881+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
31882+static inline void pgd_dtor(pgd_t *pgd) {}
31883+#else
31884 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
31885 {
31886 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
31887@@ -135,6 +192,7 @@ static void pgd_dtor(pgd_t *pgd)
31888 pgd_list_del(pgd);
31889 spin_unlock(&pgd_lock);
31890 }
31891+#endif
31892
31893 /*
31894 * List of all pgd's needed for non-PAE so it can invalidate entries
31895@@ -147,7 +205,7 @@ static void pgd_dtor(pgd_t *pgd)
31896 * -- nyc
31897 */
31898
31899-#ifdef CONFIG_X86_PAE
31900+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
31901 /*
31902 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
31903 * updating the top-level pagetable entries to guarantee the
31904@@ -159,7 +217,7 @@ static void pgd_dtor(pgd_t *pgd)
31905 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
31906 * and initialize the kernel pmds here.
31907 */
31908-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
31909+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
31910
31911 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
31912 {
31913@@ -177,36 +235,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
31914 */
31915 flush_tlb_mm(mm);
31916 }
31917+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
31918+#define PREALLOCATED_PXDS USER_PGD_PTRS
31919 #else /* !CONFIG_X86_PAE */
31920
31921 /* No need to prepopulate any pagetable entries in non-PAE modes. */
31922-#define PREALLOCATED_PMDS 0
31923+#define PREALLOCATED_PXDS 0
31924
31925 #endif /* CONFIG_X86_PAE */
31926
31927-static void free_pmds(pmd_t *pmds[])
31928+static void free_pxds(pxd_t *pxds[])
31929 {
31930 int i;
31931
31932- for(i = 0; i < PREALLOCATED_PMDS; i++)
31933- if (pmds[i])
31934- free_page((unsigned long)pmds[i]);
31935+ for(i = 0; i < PREALLOCATED_PXDS; i++)
31936+ if (pxds[i])
31937+ free_page((unsigned long)pxds[i]);
31938 }
31939
31940-static int preallocate_pmds(pmd_t *pmds[])
31941+static int preallocate_pxds(pxd_t *pxds[])
31942 {
31943 int i;
31944 bool failed = false;
31945
31946- for(i = 0; i < PREALLOCATED_PMDS; i++) {
31947- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
31948- if (pmd == NULL)
31949+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
31950+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
31951+ if (pxd == NULL)
31952 failed = true;
31953- pmds[i] = pmd;
31954+ pxds[i] = pxd;
31955 }
31956
31957 if (failed) {
31958- free_pmds(pmds);
31959+ free_pxds(pxds);
31960 return -ENOMEM;
31961 }
31962
31963@@ -219,51 +279,55 @@ static int preallocate_pmds(pmd_t *pmds[])
31964 * preallocate which never got a corresponding vma will need to be
31965 * freed manually.
31966 */
31967-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
31968+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
31969 {
31970 int i;
31971
31972- for(i = 0; i < PREALLOCATED_PMDS; i++) {
31973+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
31974 pgd_t pgd = pgdp[i];
31975
31976 if (pgd_val(pgd) != 0) {
31977- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
31978+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
31979
31980- pgdp[i] = native_make_pgd(0);
31981+ set_pgd(pgdp + i, native_make_pgd(0));
31982
31983- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
31984- pmd_free(mm, pmd);
31985+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
31986+ pxd_free(mm, pxd);
31987 }
31988 }
31989 }
31990
31991-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
31992+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
31993 {
31994- pud_t *pud;
31995+ pyd_t *pyd;
31996 unsigned long addr;
31997 int i;
31998
31999- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
32000+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
32001 return;
32002
32003- pud = pud_offset(pgd, 0);
32004+#ifdef CONFIG_X86_64
32005+ pyd = pyd_offset(mm, 0L);
32006+#else
32007+ pyd = pyd_offset(pgd, 0L);
32008+#endif
32009
32010- for (addr = i = 0; i < PREALLOCATED_PMDS;
32011- i++, pud++, addr += PUD_SIZE) {
32012- pmd_t *pmd = pmds[i];
32013+ for (addr = i = 0; i < PREALLOCATED_PXDS;
32014+ i++, pyd++, addr += PYD_SIZE) {
32015+ pxd_t *pxd = pxds[i];
32016
32017 if (i >= KERNEL_PGD_BOUNDARY)
32018- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
32019- sizeof(pmd_t) * PTRS_PER_PMD);
32020+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
32021+ sizeof(pxd_t) * PTRS_PER_PMD);
32022
32023- pud_populate(mm, pud, pmd);
32024+ pyd_populate(mm, pyd, pxd);
32025 }
32026 }
32027
32028 pgd_t *pgd_alloc(struct mm_struct *mm)
32029 {
32030 pgd_t *pgd;
32031- pmd_t *pmds[PREALLOCATED_PMDS];
32032+ pxd_t *pxds[PREALLOCATED_PXDS];
32033
32034 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
32035
32036@@ -272,11 +336,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
32037
32038 mm->pgd = pgd;
32039
32040- if (preallocate_pmds(pmds) != 0)
32041+ if (preallocate_pxds(pxds) != 0)
32042 goto out_free_pgd;
32043
32044 if (paravirt_pgd_alloc(mm) != 0)
32045- goto out_free_pmds;
32046+ goto out_free_pxds;
32047
32048 /*
32049 * Make sure that pre-populating the pmds is atomic with
32050@@ -286,14 +350,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
32051 spin_lock(&pgd_lock);
32052
32053 pgd_ctor(mm, pgd);
32054- pgd_prepopulate_pmd(mm, pgd, pmds);
32055+ pgd_prepopulate_pxd(mm, pgd, pxds);
32056
32057 spin_unlock(&pgd_lock);
32058
32059 return pgd;
32060
32061-out_free_pmds:
32062- free_pmds(pmds);
32063+out_free_pxds:
32064+ free_pxds(pxds);
32065 out_free_pgd:
32066 free_page((unsigned long)pgd);
32067 out:
32068@@ -302,7 +366,7 @@ out:
32069
32070 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
32071 {
32072- pgd_mop_up_pmds(mm, pgd);
32073+ pgd_mop_up_pxds(mm, pgd);
32074 pgd_dtor(pgd);
32075 paravirt_pgd_free(mm, pgd);
32076 free_page((unsigned long)pgd);
32077diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
32078index a69bcb8..19068ab 100644
32079--- a/arch/x86/mm/pgtable_32.c
32080+++ b/arch/x86/mm/pgtable_32.c
32081@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
32082 return;
32083 }
32084 pte = pte_offset_kernel(pmd, vaddr);
32085+
32086+ pax_open_kernel();
32087 if (pte_val(pteval))
32088 set_pte_at(&init_mm, vaddr, pte, pteval);
32089 else
32090 pte_clear(&init_mm, vaddr, pte);
32091+ pax_close_kernel();
32092
32093 /*
32094 * It's enough to flush this one mapping.
32095diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
32096index e666cbb..61788c45 100644
32097--- a/arch/x86/mm/physaddr.c
32098+++ b/arch/x86/mm/physaddr.c
32099@@ -10,7 +10,7 @@
32100 #ifdef CONFIG_X86_64
32101
32102 #ifdef CONFIG_DEBUG_VIRTUAL
32103-unsigned long __phys_addr(unsigned long x)
32104+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
32105 {
32106 unsigned long y = x - __START_KERNEL_map;
32107
32108@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
32109 #else
32110
32111 #ifdef CONFIG_DEBUG_VIRTUAL
32112-unsigned long __phys_addr(unsigned long x)
32113+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
32114 {
32115 unsigned long phys_addr = x - PAGE_OFFSET;
32116 /* VMALLOC_* aren't constants */
32117diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
32118index 410531d..0f16030 100644
32119--- a/arch/x86/mm/setup_nx.c
32120+++ b/arch/x86/mm/setup_nx.c
32121@@ -5,8 +5,10 @@
32122 #include <asm/pgtable.h>
32123 #include <asm/proto.h>
32124
32125+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
32126 static int disable_nx __cpuinitdata;
32127
32128+#ifndef CONFIG_PAX_PAGEEXEC
32129 /*
32130 * noexec = on|off
32131 *
32132@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
32133 return 0;
32134 }
32135 early_param("noexec", noexec_setup);
32136+#endif
32137+
32138+#endif
32139
32140 void __cpuinit x86_configure_nx(void)
32141 {
32142+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
32143 if (cpu_has_nx && !disable_nx)
32144 __supported_pte_mask |= _PAGE_NX;
32145 else
32146+#endif
32147 __supported_pte_mask &= ~_PAGE_NX;
32148 }
32149
32150diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
32151index 282375f..e03a98f 100644
32152--- a/arch/x86/mm/tlb.c
32153+++ b/arch/x86/mm/tlb.c
32154@@ -48,7 +48,11 @@ void leave_mm(int cpu)
32155 BUG();
32156 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
32157 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
32158+
32159+#ifndef CONFIG_PAX_PER_CPU_PGD
32160 load_cr3(swapper_pg_dir);
32161+#endif
32162+
32163 }
32164 }
32165 EXPORT_SYMBOL_GPL(leave_mm);
32166diff --git a/arch/x86/mm/uderef_64.c b/arch/x86/mm/uderef_64.c
32167new file mode 100644
32168index 0000000..dace51c
32169--- /dev/null
32170+++ b/arch/x86/mm/uderef_64.c
32171@@ -0,0 +1,37 @@
32172+#include <linux/mm.h>
32173+#include <asm/pgtable.h>
32174+#include <asm/uaccess.h>
32175+
32176+#ifdef CONFIG_PAX_MEMORY_UDEREF
32177+/* PaX: due to the special call convention these functions must
32178+ * - remain leaf functions under all configurations,
32179+ * - never be called directly, only dereferenced from the wrappers.
32180+ */
32181+void __pax_open_userland(void)
32182+{
32183+ unsigned int cpu;
32184+
32185+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
32186+ return;
32187+
32188+ cpu = raw_get_cpu();
32189+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_KERNEL);
32190+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
32191+ raw_put_cpu_no_resched();
32192+}
32193+EXPORT_SYMBOL(__pax_open_userland);
32194+
32195+void __pax_close_userland(void)
32196+{
32197+ unsigned int cpu;
32198+
32199+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
32200+ return;
32201+
32202+ cpu = raw_get_cpu();
32203+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_USER);
32204+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
32205+ raw_put_cpu_no_resched();
32206+}
32207+EXPORT_SYMBOL(__pax_close_userland);
32208+#endif
32209diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
32210index 877b9a1..a8ecf42 100644
32211--- a/arch/x86/net/bpf_jit.S
32212+++ b/arch/x86/net/bpf_jit.S
32213@@ -9,6 +9,7 @@
32214 */
32215 #include <linux/linkage.h>
32216 #include <asm/dwarf2.h>
32217+#include <asm/alternative-asm.h>
32218
32219 /*
32220 * Calling convention :
32221@@ -35,6 +36,7 @@ sk_load_word_positive_offset:
32222 jle bpf_slow_path_word
32223 mov (SKBDATA,%rsi),%eax
32224 bswap %eax /* ntohl() */
32225+ pax_force_retaddr
32226 ret
32227
32228 sk_load_half:
32229@@ -52,6 +54,7 @@ sk_load_half_positive_offset:
32230 jle bpf_slow_path_half
32231 movzwl (SKBDATA,%rsi),%eax
32232 rol $8,%ax # ntohs()
32233+ pax_force_retaddr
32234 ret
32235
32236 sk_load_byte:
32237@@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
32238 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
32239 jle bpf_slow_path_byte
32240 movzbl (SKBDATA,%rsi),%eax
32241+ pax_force_retaddr
32242 ret
32243
32244 /**
32245@@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
32246 movzbl (SKBDATA,%rsi),%ebx
32247 and $15,%bl
32248 shl $2,%bl
32249+ pax_force_retaddr
32250 ret
32251
32252 /* rsi contains offset and can be scratched */
32253@@ -109,6 +114,7 @@ bpf_slow_path_word:
32254 js bpf_error
32255 mov -12(%rbp),%eax
32256 bswap %eax
32257+ pax_force_retaddr
32258 ret
32259
32260 bpf_slow_path_half:
32261@@ -117,12 +123,14 @@ bpf_slow_path_half:
32262 mov -12(%rbp),%ax
32263 rol $8,%ax
32264 movzwl %ax,%eax
32265+ pax_force_retaddr
32266 ret
32267
32268 bpf_slow_path_byte:
32269 bpf_slow_path_common(1)
32270 js bpf_error
32271 movzbl -12(%rbp),%eax
32272+ pax_force_retaddr
32273 ret
32274
32275 bpf_slow_path_byte_msh:
32276@@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
32277 and $15,%al
32278 shl $2,%al
32279 xchg %eax,%ebx
32280+ pax_force_retaddr
32281 ret
32282
32283 #define sk_negative_common(SIZE) \
32284@@ -157,6 +166,7 @@ sk_load_word_negative_offset:
32285 sk_negative_common(4)
32286 mov (%rax), %eax
32287 bswap %eax
32288+ pax_force_retaddr
32289 ret
32290
32291 bpf_slow_path_half_neg:
32292@@ -168,6 +178,7 @@ sk_load_half_negative_offset:
32293 mov (%rax),%ax
32294 rol $8,%ax
32295 movzwl %ax,%eax
32296+ pax_force_retaddr
32297 ret
32298
32299 bpf_slow_path_byte_neg:
32300@@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
32301 .globl sk_load_byte_negative_offset
32302 sk_negative_common(1)
32303 movzbl (%rax), %eax
32304+ pax_force_retaddr
32305 ret
32306
32307 bpf_slow_path_byte_msh_neg:
32308@@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
32309 and $15,%al
32310 shl $2,%al
32311 xchg %eax,%ebx
32312+ pax_force_retaddr
32313 ret
32314
32315 bpf_error:
32316@@ -197,4 +210,5 @@ bpf_error:
32317 xor %eax,%eax
32318 mov -8(%rbp),%rbx
32319 leaveq
32320+ pax_force_retaddr
32321 ret
32322diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
32323index f66b540..3e88dfb 100644
32324--- a/arch/x86/net/bpf_jit_comp.c
32325+++ b/arch/x86/net/bpf_jit_comp.c
32326@@ -12,6 +12,7 @@
32327 #include <linux/netdevice.h>
32328 #include <linux/filter.h>
32329 #include <linux/if_vlan.h>
32330+#include <linux/random.h>
32331
32332 /*
32333 * Conventions :
32334@@ -49,13 +50,90 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
32335 return ptr + len;
32336 }
32337
32338+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
32339+#define MAX_INSTR_CODE_SIZE 96
32340+#else
32341+#define MAX_INSTR_CODE_SIZE 64
32342+#endif
32343+
32344 #define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0)
32345
32346 #define EMIT1(b1) EMIT(b1, 1)
32347 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
32348 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
32349 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
32350+
32351+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
32352+/* original constant will appear in ecx */
32353+#define DILUTE_CONST_SEQUENCE(_off, _key) \
32354+do { \
32355+ /* mov ecx, randkey */ \
32356+ EMIT1(0xb9); \
32357+ EMIT(_key, 4); \
32358+ /* xor ecx, randkey ^ off */ \
32359+ EMIT2(0x81, 0xf1); \
32360+ EMIT((_key) ^ (_off), 4); \
32361+} while (0)
32362+
32363+#define EMIT1_off32(b1, _off) \
32364+do { \
32365+ switch (b1) { \
32366+ case 0x05: /* add eax, imm32 */ \
32367+ case 0x2d: /* sub eax, imm32 */ \
32368+ case 0x25: /* and eax, imm32 */ \
32369+ case 0x0d: /* or eax, imm32 */ \
32370+ case 0xb8: /* mov eax, imm32 */ \
32371+ case 0x35: /* xor eax, imm32 */ \
32372+ case 0x3d: /* cmp eax, imm32 */ \
32373+ case 0xa9: /* test eax, imm32 */ \
32374+ DILUTE_CONST_SEQUENCE(_off, randkey); \
32375+ EMIT2((b1) - 4, 0xc8); /* convert imm instruction to eax, ecx */\
32376+ break; \
32377+ case 0xbb: /* mov ebx, imm32 */ \
32378+ DILUTE_CONST_SEQUENCE(_off, randkey); \
32379+ /* mov ebx, ecx */ \
32380+ EMIT2(0x89, 0xcb); \
32381+ break; \
32382+ case 0xbe: /* mov esi, imm32 */ \
32383+ DILUTE_CONST_SEQUENCE(_off, randkey); \
32384+ /* mov esi, ecx */ \
32385+ EMIT2(0x89, 0xce); \
32386+ break; \
32387+ case 0xe8: /* call rel imm32, always to known funcs */ \
32388+ EMIT1(b1); \
32389+ EMIT(_off, 4); \
32390+ break; \
32391+ case 0xe9: /* jmp rel imm32 */ \
32392+ EMIT1(b1); \
32393+ EMIT(_off, 4); \
32394+ /* prevent fall-through, we're not called if off = 0 */ \
32395+ EMIT(0xcccccccc, 4); \
32396+ EMIT(0xcccccccc, 4); \
32397+ break; \
32398+ default: \
32399+ BUILD_BUG(); \
32400+ } \
32401+} while (0)
32402+
32403+#define EMIT2_off32(b1, b2, _off) \
32404+do { \
32405+ if ((b1) == 0x8d && (b2) == 0xb3) { /* lea esi, [rbx+imm32] */ \
32406+ EMIT2(0x8d, 0xb3); /* lea esi, [rbx+randkey] */ \
32407+ EMIT(randkey, 4); \
32408+ EMIT2(0x8d, 0xb6); /* lea esi, [esi+off-randkey] */ \
32409+ EMIT((_off) - randkey, 4); \
32410+ } else if ((b1) == 0x69 && (b2) == 0xc0) { /* imul eax, imm32 */\
32411+ DILUTE_CONST_SEQUENCE(_off, randkey); \
32412+ /* imul eax, ecx */ \
32413+ EMIT3(0x0f, 0xaf, 0xc1); \
32414+ } else { \
32415+ BUILD_BUG(); \
32416+ } \
32417+} while (0)
32418+#else
32419 #define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0)
32420+#define EMIT2_off32(b1, b2, off) do { EMIT2(b1, b2); EMIT(off, 4);} while (0)
32421+#endif
32422
32423 #define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
32424 #define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
32425@@ -90,6 +168,24 @@ do { \
32426 #define X86_JBE 0x76
32427 #define X86_JA 0x77
32428
32429+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
32430+#define APPEND_FLOW_VERIFY() \
32431+do { \
32432+ /* mov ecx, randkey */ \
32433+ EMIT1(0xb9); \
32434+ EMIT(randkey, 4); \
32435+ /* cmp ecx, randkey */ \
32436+ EMIT2(0x81, 0xf9); \
32437+ EMIT(randkey, 4); \
32438+ /* jz after 8 int 3s */ \
32439+ EMIT2(0x74, 0x08); \
32440+ EMIT(0xcccccccc, 4); \
32441+ EMIT(0xcccccccc, 4); \
32442+} while (0)
32443+#else
32444+#define APPEND_FLOW_VERIFY() do { } while (0)
32445+#endif
32446+
32447 #define EMIT_COND_JMP(op, offset) \
32448 do { \
32449 if (is_near(offset)) \
32450@@ -97,6 +193,7 @@ do { \
32451 else { \
32452 EMIT2(0x0f, op + 0x10); \
32453 EMIT(offset, 4); /* jxx .+off32 */ \
32454+ APPEND_FLOW_VERIFY(); \
32455 } \
32456 } while (0)
32457
32458@@ -121,6 +218,11 @@ static inline void bpf_flush_icache(void *start, void *end)
32459 set_fs(old_fs);
32460 }
32461
32462+struct bpf_jit_work {
32463+ struct work_struct work;
32464+ void *image;
32465+};
32466+
32467 #define CHOOSE_LOAD_FUNC(K, func) \
32468 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
32469
32470@@ -146,7 +248,7 @@ static int pkt_type_offset(void)
32471
32472 void bpf_jit_compile(struct sk_filter *fp)
32473 {
32474- u8 temp[64];
32475+ u8 temp[MAX_INSTR_CODE_SIZE];
32476 u8 *prog;
32477 unsigned int proglen, oldproglen = 0;
32478 int ilen, i;
32479@@ -159,6 +261,9 @@ void bpf_jit_compile(struct sk_filter *fp)
32480 unsigned int *addrs;
32481 const struct sock_filter *filter = fp->insns;
32482 int flen = fp->len;
32483+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
32484+ unsigned int randkey;
32485+#endif
32486
32487 if (!bpf_jit_enable)
32488 return;
32489@@ -167,11 +272,19 @@ void bpf_jit_compile(struct sk_filter *fp)
32490 if (addrs == NULL)
32491 return;
32492
32493+ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
32494+ if (!fp->work)
32495+ goto out;
32496+
32497+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
32498+ randkey = get_random_int();
32499+#endif
32500+
32501 /* Before first pass, make a rough estimation of addrs[]
32502- * each bpf instruction is translated to less than 64 bytes
32503+ * each bpf instruction is translated to less than MAX_INSTR_CODE_SIZE bytes
32504 */
32505 for (proglen = 0, i = 0; i < flen; i++) {
32506- proglen += 64;
32507+ proglen += MAX_INSTR_CODE_SIZE;
32508 addrs[i] = proglen;
32509 }
32510 cleanup_addr = proglen; /* epilogue address */
32511@@ -282,10 +395,8 @@ void bpf_jit_compile(struct sk_filter *fp)
32512 case BPF_S_ALU_MUL_K: /* A *= K */
32513 if (is_imm8(K))
32514 EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
32515- else {
32516- EMIT2(0x69, 0xc0); /* imul imm32,%eax */
32517- EMIT(K, 4);
32518- }
32519+ else
32520+ EMIT2_off32(0x69, 0xc0, K); /* imul imm32,%eax */
32521 break;
32522 case BPF_S_ALU_DIV_X: /* A /= X; */
32523 seen |= SEEN_XREG;
32524@@ -325,13 +436,23 @@ void bpf_jit_compile(struct sk_filter *fp)
32525 break;
32526 case BPF_S_ALU_MOD_K: /* A %= K; */
32527 EMIT2(0x31, 0xd2); /* xor %edx,%edx */
32528+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
32529+ DILUTE_CONST_SEQUENCE(K, randkey);
32530+#else
32531 EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
32532+#endif
32533 EMIT2(0xf7, 0xf1); /* div %ecx */
32534 EMIT2(0x89, 0xd0); /* mov %edx,%eax */
32535 break;
32536 case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
32537+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
32538+ DILUTE_CONST_SEQUENCE(K, randkey);
32539+ // imul rax, rcx
32540+ EMIT4(0x48, 0x0f, 0xaf, 0xc1);
32541+#else
32542 EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */
32543 EMIT(K, 4);
32544+#endif
32545 EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */
32546 break;
32547 case BPF_S_ALU_AND_X:
32548@@ -602,8 +723,7 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
32549 if (is_imm8(K)) {
32550 EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
32551 } else {
32552- EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */
32553- EMIT(K, 4);
32554+ EMIT2_off32(0x8d, 0xb3, K); /* lea imm32(%rbx),%esi */
32555 }
32556 } else {
32557 EMIT2(0x89,0xde); /* mov %ebx,%esi */
32558@@ -686,17 +806,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
32559 break;
32560 default:
32561 /* hmm, too complex filter, give up with jit compiler */
32562- goto out;
32563+ goto error;
32564 }
32565 ilen = prog - temp;
32566 if (image) {
32567 if (unlikely(proglen + ilen > oldproglen)) {
32568 pr_err("bpb_jit_compile fatal error\n");
32569- kfree(addrs);
32570- module_free(NULL, image);
32571- return;
32572+ module_free_exec(NULL, image);
32573+ goto error;
32574 }
32575+ pax_open_kernel();
32576 memcpy(image + proglen, temp, ilen);
32577+ pax_close_kernel();
32578 }
32579 proglen += ilen;
32580 addrs[i] = proglen;
32581@@ -717,11 +838,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
32582 break;
32583 }
32584 if (proglen == oldproglen) {
32585- image = module_alloc(max_t(unsigned int,
32586- proglen,
32587- sizeof(struct work_struct)));
32588+ image = module_alloc_exec(proglen);
32589 if (!image)
32590- goto out;
32591+ goto error;
32592 }
32593 oldproglen = proglen;
32594 }
32595@@ -732,7 +851,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
32596 if (image) {
32597 bpf_flush_icache(image, image + proglen);
32598 fp->bpf_func = (void *)image;
32599- }
32600+ } else
32601+error:
32602+ kfree(fp->work);
32603+
32604 out:
32605 kfree(addrs);
32606 return;
32607@@ -740,18 +862,20 @@ out:
32608
32609 static void jit_free_defer(struct work_struct *arg)
32610 {
32611- module_free(NULL, arg);
32612+ module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
32613+ kfree(arg);
32614 }
32615
32616 /* run from softirq, we must use a work_struct to call
32617- * module_free() from process context
32618+ * module_free_exec() from process context
32619 */
32620 void bpf_jit_free(struct sk_filter *fp)
32621 {
32622 if (fp->bpf_func != sk_run_filter) {
32623- struct work_struct *work = (struct work_struct *)fp->bpf_func;
32624+ struct work_struct *work = &fp->work->work;
32625
32626 INIT_WORK(work, jit_free_defer);
32627+ fp->work->image = fp->bpf_func;
32628 schedule_work(work);
32629 }
32630 }
32631diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
32632index d6aa6e8..266395a 100644
32633--- a/arch/x86/oprofile/backtrace.c
32634+++ b/arch/x86/oprofile/backtrace.c
32635@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
32636 struct stack_frame_ia32 *fp;
32637 unsigned long bytes;
32638
32639- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
32640+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
32641 if (bytes != sizeof(bufhead))
32642 return NULL;
32643
32644- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
32645+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
32646
32647 oprofile_add_trace(bufhead[0].return_address);
32648
32649@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
32650 struct stack_frame bufhead[2];
32651 unsigned long bytes;
32652
32653- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
32654+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
32655 if (bytes != sizeof(bufhead))
32656 return NULL;
32657
32658@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
32659 {
32660 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
32661
32662- if (!user_mode_vm(regs)) {
32663+ if (!user_mode(regs)) {
32664 unsigned long stack = kernel_stack_pointer(regs);
32665 if (depth)
32666 dump_trace(NULL, regs, (unsigned long *)stack, 0,
32667diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
32668index 48768df..ba9143c 100644
32669--- a/arch/x86/oprofile/nmi_int.c
32670+++ b/arch/x86/oprofile/nmi_int.c
32671@@ -23,6 +23,7 @@
32672 #include <asm/nmi.h>
32673 #include <asm/msr.h>
32674 #include <asm/apic.h>
32675+#include <asm/pgtable.h>
32676
32677 #include "op_counter.h"
32678 #include "op_x86_model.h"
32679@@ -774,8 +775,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
32680 if (ret)
32681 return ret;
32682
32683- if (!model->num_virt_counters)
32684- model->num_virt_counters = model->num_counters;
32685+ if (!model->num_virt_counters) {
32686+ pax_open_kernel();
32687+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
32688+ pax_close_kernel();
32689+ }
32690
32691 mux_init(ops);
32692
32693diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
32694index b2b9443..be58856 100644
32695--- a/arch/x86/oprofile/op_model_amd.c
32696+++ b/arch/x86/oprofile/op_model_amd.c
32697@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
32698 num_counters = AMD64_NUM_COUNTERS;
32699 }
32700
32701- op_amd_spec.num_counters = num_counters;
32702- op_amd_spec.num_controls = num_counters;
32703- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
32704+ pax_open_kernel();
32705+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
32706+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
32707+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
32708+ pax_close_kernel();
32709
32710 return 0;
32711 }
32712diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
32713index d90528e..0127e2b 100644
32714--- a/arch/x86/oprofile/op_model_ppro.c
32715+++ b/arch/x86/oprofile/op_model_ppro.c
32716@@ -19,6 +19,7 @@
32717 #include <asm/msr.h>
32718 #include <asm/apic.h>
32719 #include <asm/nmi.h>
32720+#include <asm/pgtable.h>
32721
32722 #include "op_x86_model.h"
32723 #include "op_counter.h"
32724@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
32725
32726 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
32727
32728- op_arch_perfmon_spec.num_counters = num_counters;
32729- op_arch_perfmon_spec.num_controls = num_counters;
32730+ pax_open_kernel();
32731+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
32732+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
32733+ pax_close_kernel();
32734 }
32735
32736 static int arch_perfmon_init(struct oprofile_operations *ignore)
32737diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
32738index 71e8a67..6a313bb 100644
32739--- a/arch/x86/oprofile/op_x86_model.h
32740+++ b/arch/x86/oprofile/op_x86_model.h
32741@@ -52,7 +52,7 @@ struct op_x86_model_spec {
32742 void (*switch_ctrl)(struct op_x86_model_spec const *model,
32743 struct op_msrs const * const msrs);
32744 #endif
32745-};
32746+} __do_const;
32747
32748 struct op_counter_config;
32749
32750diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
32751index e9e6ed5..e47ae67 100644
32752--- a/arch/x86/pci/amd_bus.c
32753+++ b/arch/x86/pci/amd_bus.c
32754@@ -337,7 +337,7 @@ static int __cpuinit amd_cpu_notify(struct notifier_block *self,
32755 return NOTIFY_OK;
32756 }
32757
32758-static struct notifier_block __cpuinitdata amd_cpu_notifier = {
32759+static struct notifier_block amd_cpu_notifier = {
32760 .notifier_call = amd_cpu_notify,
32761 };
32762
32763diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
32764index 372e9b8..e775a6c 100644
32765--- a/arch/x86/pci/irq.c
32766+++ b/arch/x86/pci/irq.c
32767@@ -50,7 +50,7 @@ struct irq_router {
32768 struct irq_router_handler {
32769 u16 vendor;
32770 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
32771-};
32772+} __do_const;
32773
32774 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
32775 void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL;
32776@@ -794,7 +794,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
32777 return 0;
32778 }
32779
32780-static __initdata struct irq_router_handler pirq_routers[] = {
32781+static __initconst const struct irq_router_handler pirq_routers[] = {
32782 { PCI_VENDOR_ID_INTEL, intel_router_probe },
32783 { PCI_VENDOR_ID_AL, ali_router_probe },
32784 { PCI_VENDOR_ID_ITE, ite_router_probe },
32785@@ -821,7 +821,7 @@ static struct pci_dev *pirq_router_dev;
32786 static void __init pirq_find_router(struct irq_router *r)
32787 {
32788 struct irq_routing_table *rt = pirq_table;
32789- struct irq_router_handler *h;
32790+ const struct irq_router_handler *h;
32791
32792 #ifdef CONFIG_PCI_BIOS
32793 if (!rt->signature) {
32794@@ -1094,7 +1094,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
32795 return 0;
32796 }
32797
32798-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
32799+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
32800 {
32801 .callback = fix_broken_hp_bios_irq9,
32802 .ident = "HP Pavilion N5400 Series Laptop",
32803diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
32804index 6eb18c4..20d83de 100644
32805--- a/arch/x86/pci/mrst.c
32806+++ b/arch/x86/pci/mrst.c
32807@@ -238,7 +238,9 @@ int __init pci_mrst_init(void)
32808 printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n");
32809 pci_mmcfg_late_init();
32810 pcibios_enable_irq = mrst_pci_irq_enable;
32811- pci_root_ops = pci_mrst_ops;
32812+ pax_open_kernel();
32813+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
32814+ pax_close_kernel();
32815 pci_soc_mode = 1;
32816 /* Continue with standard init */
32817 return 1;
32818diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
32819index c77b24a..c979855 100644
32820--- a/arch/x86/pci/pcbios.c
32821+++ b/arch/x86/pci/pcbios.c
32822@@ -79,7 +79,7 @@ union bios32 {
32823 static struct {
32824 unsigned long address;
32825 unsigned short segment;
32826-} bios32_indirect = { 0, __KERNEL_CS };
32827+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
32828
32829 /*
32830 * Returns the entry point for the given service, NULL on error
32831@@ -92,37 +92,80 @@ static unsigned long bios32_service(unsigned long service)
32832 unsigned long length; /* %ecx */
32833 unsigned long entry; /* %edx */
32834 unsigned long flags;
32835+ struct desc_struct d, *gdt;
32836
32837 local_irq_save(flags);
32838- __asm__("lcall *(%%edi); cld"
32839+
32840+ gdt = get_cpu_gdt_table(smp_processor_id());
32841+
32842+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
32843+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
32844+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
32845+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
32846+
32847+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
32848 : "=a" (return_code),
32849 "=b" (address),
32850 "=c" (length),
32851 "=d" (entry)
32852 : "0" (service),
32853 "1" (0),
32854- "D" (&bios32_indirect));
32855+ "D" (&bios32_indirect),
32856+ "r"(__PCIBIOS_DS)
32857+ : "memory");
32858+
32859+ pax_open_kernel();
32860+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
32861+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
32862+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
32863+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
32864+ pax_close_kernel();
32865+
32866 local_irq_restore(flags);
32867
32868 switch (return_code) {
32869- case 0:
32870- return address + entry;
32871- case 0x80: /* Not present */
32872- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
32873- return 0;
32874- default: /* Shouldn't happen */
32875- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
32876- service, return_code);
32877+ case 0: {
32878+ int cpu;
32879+ unsigned char flags;
32880+
32881+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
32882+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
32883+ printk(KERN_WARNING "bios32_service: not valid\n");
32884 return 0;
32885+ }
32886+ address = address + PAGE_OFFSET;
32887+ length += 16UL; /* some BIOSs underreport this... */
32888+ flags = 4;
32889+ if (length >= 64*1024*1024) {
32890+ length >>= PAGE_SHIFT;
32891+ flags |= 8;
32892+ }
32893+
32894+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
32895+ gdt = get_cpu_gdt_table(cpu);
32896+ pack_descriptor(&d, address, length, 0x9b, flags);
32897+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
32898+ pack_descriptor(&d, address, length, 0x93, flags);
32899+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
32900+ }
32901+ return entry;
32902+ }
32903+ case 0x80: /* Not present */
32904+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
32905+ return 0;
32906+ default: /* Shouldn't happen */
32907+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
32908+ service, return_code);
32909+ return 0;
32910 }
32911 }
32912
32913 static struct {
32914 unsigned long address;
32915 unsigned short segment;
32916-} pci_indirect = { 0, __KERNEL_CS };
32917+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
32918
32919-static int pci_bios_present;
32920+static int pci_bios_present __read_only;
32921
32922 static int check_pcibios(void)
32923 {
32924@@ -131,11 +174,13 @@ static int check_pcibios(void)
32925 unsigned long flags, pcibios_entry;
32926
32927 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
32928- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
32929+ pci_indirect.address = pcibios_entry;
32930
32931 local_irq_save(flags);
32932- __asm__(
32933- "lcall *(%%edi); cld\n\t"
32934+ __asm__("movw %w6, %%ds\n\t"
32935+ "lcall *%%ss:(%%edi); cld\n\t"
32936+ "push %%ss\n\t"
32937+ "pop %%ds\n\t"
32938 "jc 1f\n\t"
32939 "xor %%ah, %%ah\n"
32940 "1:"
32941@@ -144,7 +189,8 @@ static int check_pcibios(void)
32942 "=b" (ebx),
32943 "=c" (ecx)
32944 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
32945- "D" (&pci_indirect)
32946+ "D" (&pci_indirect),
32947+ "r" (__PCIBIOS_DS)
32948 : "memory");
32949 local_irq_restore(flags);
32950
32951@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
32952
32953 switch (len) {
32954 case 1:
32955- __asm__("lcall *(%%esi); cld\n\t"
32956+ __asm__("movw %w6, %%ds\n\t"
32957+ "lcall *%%ss:(%%esi); cld\n\t"
32958+ "push %%ss\n\t"
32959+ "pop %%ds\n\t"
32960 "jc 1f\n\t"
32961 "xor %%ah, %%ah\n"
32962 "1:"
32963@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
32964 : "1" (PCIBIOS_READ_CONFIG_BYTE),
32965 "b" (bx),
32966 "D" ((long)reg),
32967- "S" (&pci_indirect));
32968+ "S" (&pci_indirect),
32969+ "r" (__PCIBIOS_DS));
32970 /*
32971 * Zero-extend the result beyond 8 bits, do not trust the
32972 * BIOS having done it:
32973@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
32974 *value &= 0xff;
32975 break;
32976 case 2:
32977- __asm__("lcall *(%%esi); cld\n\t"
32978+ __asm__("movw %w6, %%ds\n\t"
32979+ "lcall *%%ss:(%%esi); cld\n\t"
32980+ "push %%ss\n\t"
32981+ "pop %%ds\n\t"
32982 "jc 1f\n\t"
32983 "xor %%ah, %%ah\n"
32984 "1:"
32985@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
32986 : "1" (PCIBIOS_READ_CONFIG_WORD),
32987 "b" (bx),
32988 "D" ((long)reg),
32989- "S" (&pci_indirect));
32990+ "S" (&pci_indirect),
32991+ "r" (__PCIBIOS_DS));
32992 /*
32993 * Zero-extend the result beyond 16 bits, do not trust the
32994 * BIOS having done it:
32995@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
32996 *value &= 0xffff;
32997 break;
32998 case 4:
32999- __asm__("lcall *(%%esi); cld\n\t"
33000+ __asm__("movw %w6, %%ds\n\t"
33001+ "lcall *%%ss:(%%esi); cld\n\t"
33002+ "push %%ss\n\t"
33003+ "pop %%ds\n\t"
33004 "jc 1f\n\t"
33005 "xor %%ah, %%ah\n"
33006 "1:"
33007@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
33008 : "1" (PCIBIOS_READ_CONFIG_DWORD),
33009 "b" (bx),
33010 "D" ((long)reg),
33011- "S" (&pci_indirect));
33012+ "S" (&pci_indirect),
33013+ "r" (__PCIBIOS_DS));
33014 break;
33015 }
33016
33017@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
33018
33019 switch (len) {
33020 case 1:
33021- __asm__("lcall *(%%esi); cld\n\t"
33022+ __asm__("movw %w6, %%ds\n\t"
33023+ "lcall *%%ss:(%%esi); cld\n\t"
33024+ "push %%ss\n\t"
33025+ "pop %%ds\n\t"
33026 "jc 1f\n\t"
33027 "xor %%ah, %%ah\n"
33028 "1:"
33029@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
33030 "c" (value),
33031 "b" (bx),
33032 "D" ((long)reg),
33033- "S" (&pci_indirect));
33034+ "S" (&pci_indirect),
33035+ "r" (__PCIBIOS_DS));
33036 break;
33037 case 2:
33038- __asm__("lcall *(%%esi); cld\n\t"
33039+ __asm__("movw %w6, %%ds\n\t"
33040+ "lcall *%%ss:(%%esi); cld\n\t"
33041+ "push %%ss\n\t"
33042+ "pop %%ds\n\t"
33043 "jc 1f\n\t"
33044 "xor %%ah, %%ah\n"
33045 "1:"
33046@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
33047 "c" (value),
33048 "b" (bx),
33049 "D" ((long)reg),
33050- "S" (&pci_indirect));
33051+ "S" (&pci_indirect),
33052+ "r" (__PCIBIOS_DS));
33053 break;
33054 case 4:
33055- __asm__("lcall *(%%esi); cld\n\t"
33056+ __asm__("movw %w6, %%ds\n\t"
33057+ "lcall *%%ss:(%%esi); cld\n\t"
33058+ "push %%ss\n\t"
33059+ "pop %%ds\n\t"
33060 "jc 1f\n\t"
33061 "xor %%ah, %%ah\n"
33062 "1:"
33063@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
33064 "c" (value),
33065 "b" (bx),
33066 "D" ((long)reg),
33067- "S" (&pci_indirect));
33068+ "S" (&pci_indirect),
33069+ "r" (__PCIBIOS_DS));
33070 break;
33071 }
33072
33073@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
33074
33075 DBG("PCI: Fetching IRQ routing table... ");
33076 __asm__("push %%es\n\t"
33077+ "movw %w8, %%ds\n\t"
33078 "push %%ds\n\t"
33079 "pop %%es\n\t"
33080- "lcall *(%%esi); cld\n\t"
33081+ "lcall *%%ss:(%%esi); cld\n\t"
33082 "pop %%es\n\t"
33083+ "push %%ss\n\t"
33084+ "pop %%ds\n"
33085 "jc 1f\n\t"
33086 "xor %%ah, %%ah\n"
33087 "1:"
33088@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
33089 "1" (0),
33090 "D" ((long) &opt),
33091 "S" (&pci_indirect),
33092- "m" (opt)
33093+ "m" (opt),
33094+ "r" (__PCIBIOS_DS)
33095 : "memory");
33096 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
33097 if (ret & 0xff00)
33098@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
33099 {
33100 int ret;
33101
33102- __asm__("lcall *(%%esi); cld\n\t"
33103+ __asm__("movw %w5, %%ds\n\t"
33104+ "lcall *%%ss:(%%esi); cld\n\t"
33105+ "push %%ss\n\t"
33106+ "pop %%ds\n"
33107 "jc 1f\n\t"
33108 "xor %%ah, %%ah\n"
33109 "1:"
33110@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
33111 : "0" (PCIBIOS_SET_PCI_HW_INT),
33112 "b" ((dev->bus->number << 8) | dev->devfn),
33113 "c" ((irq << 8) | (pin + 10)),
33114- "S" (&pci_indirect));
33115+ "S" (&pci_indirect),
33116+ "r" (__PCIBIOS_DS));
33117 return !(ret & 0xff00);
33118 }
33119 EXPORT_SYMBOL(pcibios_set_irq_routing);
33120diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
33121index 40e4469..d915bf9 100644
33122--- a/arch/x86/platform/efi/efi_32.c
33123+++ b/arch/x86/platform/efi/efi_32.c
33124@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
33125 {
33126 struct desc_ptr gdt_descr;
33127
33128+#ifdef CONFIG_PAX_KERNEXEC
33129+ struct desc_struct d;
33130+#endif
33131+
33132 local_irq_save(efi_rt_eflags);
33133
33134 load_cr3(initial_page_table);
33135 __flush_tlb_all();
33136
33137+#ifdef CONFIG_PAX_KERNEXEC
33138+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
33139+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
33140+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
33141+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
33142+#endif
33143+
33144 gdt_descr.address = __pa(get_cpu_gdt_table(0));
33145 gdt_descr.size = GDT_SIZE - 1;
33146 load_gdt(&gdt_descr);
33147@@ -58,11 +69,24 @@ void efi_call_phys_epilog(void)
33148 {
33149 struct desc_ptr gdt_descr;
33150
33151+#ifdef CONFIG_PAX_KERNEXEC
33152+ struct desc_struct d;
33153+
33154+ memset(&d, 0, sizeof d);
33155+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
33156+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
33157+#endif
33158+
33159 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
33160 gdt_descr.size = GDT_SIZE - 1;
33161 load_gdt(&gdt_descr);
33162
33163+#ifdef CONFIG_PAX_PER_CPU_PGD
33164+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
33165+#else
33166 load_cr3(swapper_pg_dir);
33167+#endif
33168+
33169 __flush_tlb_all();
33170
33171 local_irq_restore(efi_rt_eflags);
33172diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
33173index 39a0e7f1..872396e 100644
33174--- a/arch/x86/platform/efi/efi_64.c
33175+++ b/arch/x86/platform/efi/efi_64.c
33176@@ -76,6 +76,11 @@ void __init efi_call_phys_prelog(void)
33177 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
33178 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
33179 }
33180+
33181+#ifdef CONFIG_PAX_PER_CPU_PGD
33182+ load_cr3(swapper_pg_dir);
33183+#endif
33184+
33185 __flush_tlb_all();
33186 }
33187
33188@@ -89,6 +94,11 @@ void __init efi_call_phys_epilog(void)
33189 for (pgd = 0; pgd < n_pgds; pgd++)
33190 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
33191 kfree(save_pgd);
33192+
33193+#ifdef CONFIG_PAX_PER_CPU_PGD
33194+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
33195+#endif
33196+
33197 __flush_tlb_all();
33198 local_irq_restore(efi_flags);
33199 early_code_mapping_set_exec(0);
33200diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
33201index fbe66e6..eae5e38 100644
33202--- a/arch/x86/platform/efi/efi_stub_32.S
33203+++ b/arch/x86/platform/efi/efi_stub_32.S
33204@@ -6,7 +6,9 @@
33205 */
33206
33207 #include <linux/linkage.h>
33208+#include <linux/init.h>
33209 #include <asm/page_types.h>
33210+#include <asm/segment.h>
33211
33212 /*
33213 * efi_call_phys(void *, ...) is a function with variable parameters.
33214@@ -20,7 +22,7 @@
33215 * service functions will comply with gcc calling convention, too.
33216 */
33217
33218-.text
33219+__INIT
33220 ENTRY(efi_call_phys)
33221 /*
33222 * 0. The function can only be called in Linux kernel. So CS has been
33223@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
33224 * The mapping of lower virtual memory has been created in prelog and
33225 * epilog.
33226 */
33227- movl $1f, %edx
33228- subl $__PAGE_OFFSET, %edx
33229- jmp *%edx
33230+#ifdef CONFIG_PAX_KERNEXEC
33231+ movl $(__KERNEXEC_EFI_DS), %edx
33232+ mov %edx, %ds
33233+ mov %edx, %es
33234+ mov %edx, %ss
33235+ addl $2f,(1f)
33236+ ljmp *(1f)
33237+
33238+__INITDATA
33239+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
33240+.previous
33241+
33242+2:
33243+ subl $2b,(1b)
33244+#else
33245+ jmp 1f-__PAGE_OFFSET
33246 1:
33247+#endif
33248
33249 /*
33250 * 2. Now on the top of stack is the return
33251@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
33252 * parameter 2, ..., param n. To make things easy, we save the return
33253 * address of efi_call_phys in a global variable.
33254 */
33255- popl %edx
33256- movl %edx, saved_return_addr
33257- /* get the function pointer into ECX*/
33258- popl %ecx
33259- movl %ecx, efi_rt_function_ptr
33260- movl $2f, %edx
33261- subl $__PAGE_OFFSET, %edx
33262- pushl %edx
33263+ popl (saved_return_addr)
33264+ popl (efi_rt_function_ptr)
33265
33266 /*
33267 * 3. Clear PG bit in %CR0.
33268@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
33269 /*
33270 * 5. Call the physical function.
33271 */
33272- jmp *%ecx
33273+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
33274
33275-2:
33276 /*
33277 * 6. After EFI runtime service returns, control will return to
33278 * following instruction. We'd better readjust stack pointer first.
33279@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
33280 movl %cr0, %edx
33281 orl $0x80000000, %edx
33282 movl %edx, %cr0
33283- jmp 1f
33284-1:
33285+
33286 /*
33287 * 8. Now restore the virtual mode from flat mode by
33288 * adding EIP with PAGE_OFFSET.
33289 */
33290- movl $1f, %edx
33291- jmp *%edx
33292+#ifdef CONFIG_PAX_KERNEXEC
33293+ movl $(__KERNEL_DS), %edx
33294+ mov %edx, %ds
33295+ mov %edx, %es
33296+ mov %edx, %ss
33297+ ljmp $(__KERNEL_CS),$1f
33298+#else
33299+ jmp 1f+__PAGE_OFFSET
33300+#endif
33301 1:
33302
33303 /*
33304 * 9. Balance the stack. And because EAX contain the return value,
33305 * we'd better not clobber it.
33306 */
33307- leal efi_rt_function_ptr, %edx
33308- movl (%edx), %ecx
33309- pushl %ecx
33310+ pushl (efi_rt_function_ptr)
33311
33312 /*
33313- * 10. Push the saved return address onto the stack and return.
33314+ * 10. Return to the saved return address.
33315 */
33316- leal saved_return_addr, %edx
33317- movl (%edx), %ecx
33318- pushl %ecx
33319- ret
33320+ jmpl *(saved_return_addr)
33321 ENDPROC(efi_call_phys)
33322 .previous
33323
33324-.data
33325+__INITDATA
33326 saved_return_addr:
33327 .long 0
33328 efi_rt_function_ptr:
33329diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
33330index 4c07cca..2c8427d 100644
33331--- a/arch/x86/platform/efi/efi_stub_64.S
33332+++ b/arch/x86/platform/efi/efi_stub_64.S
33333@@ -7,6 +7,7 @@
33334 */
33335
33336 #include <linux/linkage.h>
33337+#include <asm/alternative-asm.h>
33338
33339 #define SAVE_XMM \
33340 mov %rsp, %rax; \
33341@@ -40,6 +41,7 @@ ENTRY(efi_call0)
33342 call *%rdi
33343 addq $32, %rsp
33344 RESTORE_XMM
33345+ pax_force_retaddr 0, 1
33346 ret
33347 ENDPROC(efi_call0)
33348
33349@@ -50,6 +52,7 @@ ENTRY(efi_call1)
33350 call *%rdi
33351 addq $32, %rsp
33352 RESTORE_XMM
33353+ pax_force_retaddr 0, 1
33354 ret
33355 ENDPROC(efi_call1)
33356
33357@@ -60,6 +63,7 @@ ENTRY(efi_call2)
33358 call *%rdi
33359 addq $32, %rsp
33360 RESTORE_XMM
33361+ pax_force_retaddr 0, 1
33362 ret
33363 ENDPROC(efi_call2)
33364
33365@@ -71,6 +75,7 @@ ENTRY(efi_call3)
33366 call *%rdi
33367 addq $32, %rsp
33368 RESTORE_XMM
33369+ pax_force_retaddr 0, 1
33370 ret
33371 ENDPROC(efi_call3)
33372
33373@@ -83,6 +88,7 @@ ENTRY(efi_call4)
33374 call *%rdi
33375 addq $32, %rsp
33376 RESTORE_XMM
33377+ pax_force_retaddr 0, 1
33378 ret
33379 ENDPROC(efi_call4)
33380
33381@@ -96,6 +102,7 @@ ENTRY(efi_call5)
33382 call *%rdi
33383 addq $48, %rsp
33384 RESTORE_XMM
33385+ pax_force_retaddr 0, 1
33386 ret
33387 ENDPROC(efi_call5)
33388
33389@@ -112,5 +119,6 @@ ENTRY(efi_call6)
33390 call *%rdi
33391 addq $48, %rsp
33392 RESTORE_XMM
33393+ pax_force_retaddr 0, 1
33394 ret
33395 ENDPROC(efi_call6)
33396diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
33397index a0a0a43..a48e233 100644
33398--- a/arch/x86/platform/mrst/mrst.c
33399+++ b/arch/x86/platform/mrst/mrst.c
33400@@ -78,13 +78,15 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
33401 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
33402 int sfi_mrtc_num;
33403
33404-static void mrst_power_off(void)
33405+static __noreturn void mrst_power_off(void)
33406 {
33407+ BUG();
33408 }
33409
33410-static void mrst_reboot(void)
33411+static __noreturn void mrst_reboot(void)
33412 {
33413 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
33414+ BUG();
33415 }
33416
33417 /* parse all the mtimer info to a static mtimer array */
33418diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
33419index d6ee929..3637cb5 100644
33420--- a/arch/x86/platform/olpc/olpc_dt.c
33421+++ b/arch/x86/platform/olpc/olpc_dt.c
33422@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
33423 return res;
33424 }
33425
33426-static struct of_pdt_ops prom_olpc_ops __initdata = {
33427+static struct of_pdt_ops prom_olpc_ops __initconst = {
33428 .nextprop = olpc_dt_nextprop,
33429 .getproplen = olpc_dt_getproplen,
33430 .getproperty = olpc_dt_getproperty,
33431diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
33432index 1cf5b30..fd45732 100644
33433--- a/arch/x86/power/cpu.c
33434+++ b/arch/x86/power/cpu.c
33435@@ -137,11 +137,8 @@ static void do_fpu_end(void)
33436 static void fix_processor_context(void)
33437 {
33438 int cpu = smp_processor_id();
33439- struct tss_struct *t = &per_cpu(init_tss, cpu);
33440-#ifdef CONFIG_X86_64
33441- struct desc_struct *desc = get_cpu_gdt_table(cpu);
33442- tss_desc tss;
33443-#endif
33444+ struct tss_struct *t = init_tss + cpu;
33445+
33446 set_tss_desc(cpu, t); /*
33447 * This just modifies memory; should not be
33448 * necessary. But... This is necessary, because
33449@@ -150,10 +147,6 @@ static void fix_processor_context(void)
33450 */
33451
33452 #ifdef CONFIG_X86_64
33453- memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
33454- tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
33455- write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
33456-
33457 syscall_init(); /* This sets MSR_*STAR and related */
33458 #endif
33459 load_TR_desc(); /* This does ltr */
33460diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
33461index a44f457..9140171 100644
33462--- a/arch/x86/realmode/init.c
33463+++ b/arch/x86/realmode/init.c
33464@@ -70,7 +70,13 @@ void __init setup_real_mode(void)
33465 __va(real_mode_header->trampoline_header);
33466
33467 #ifdef CONFIG_X86_32
33468- trampoline_header->start = __pa_symbol(startup_32_smp);
33469+ trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
33470+
33471+#ifdef CONFIG_PAX_KERNEXEC
33472+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
33473+#endif
33474+
33475+ trampoline_header->boot_cs = __BOOT_CS;
33476 trampoline_header->gdt_limit = __BOOT_DS + 7;
33477 trampoline_header->gdt_base = __pa_symbol(boot_gdt);
33478 #else
33479@@ -86,7 +92,7 @@ void __init setup_real_mode(void)
33480 *trampoline_cr4_features = read_cr4();
33481
33482 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
33483- trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
33484+ trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
33485 trampoline_pgd[511] = init_level4_pgt[511].pgd;
33486 #endif
33487 }
33488diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
33489index 8869287..d577672 100644
33490--- a/arch/x86/realmode/rm/Makefile
33491+++ b/arch/x86/realmode/rm/Makefile
33492@@ -78,5 +78,8 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
33493 $(call cc-option, -fno-unit-at-a-time)) \
33494 $(call cc-option, -fno-stack-protector) \
33495 $(call cc-option, -mpreferred-stack-boundary=2)
33496+ifdef CONSTIFY_PLUGIN
33497+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
33498+endif
33499 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
33500 GCOV_PROFILE := n
33501diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
33502index a28221d..93c40f1 100644
33503--- a/arch/x86/realmode/rm/header.S
33504+++ b/arch/x86/realmode/rm/header.S
33505@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
33506 #endif
33507 /* APM/BIOS reboot */
33508 .long pa_machine_real_restart_asm
33509-#ifdef CONFIG_X86_64
33510+#ifdef CONFIG_X86_32
33511+ .long __KERNEL_CS
33512+#else
33513 .long __KERNEL32_CS
33514 #endif
33515 END(real_mode_header)
33516diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
33517index c1b2791..f9e31c7 100644
33518--- a/arch/x86/realmode/rm/trampoline_32.S
33519+++ b/arch/x86/realmode/rm/trampoline_32.S
33520@@ -25,6 +25,12 @@
33521 #include <asm/page_types.h>
33522 #include "realmode.h"
33523
33524+#ifdef CONFIG_PAX_KERNEXEC
33525+#define ta(X) (X)
33526+#else
33527+#define ta(X) (pa_ ## X)
33528+#endif
33529+
33530 .text
33531 .code16
33532
33533@@ -39,8 +45,6 @@ ENTRY(trampoline_start)
33534
33535 cli # We should be safe anyway
33536
33537- movl tr_start, %eax # where we need to go
33538-
33539 movl $0xA5A5A5A5, trampoline_status
33540 # write marker for master knows we're running
33541
33542@@ -56,7 +60,7 @@ ENTRY(trampoline_start)
33543 movw $1, %dx # protected mode (PE) bit
33544 lmsw %dx # into protected mode
33545
33546- ljmpl $__BOOT_CS, $pa_startup_32
33547+ ljmpl *(trampoline_header)
33548
33549 .section ".text32","ax"
33550 .code32
33551@@ -67,7 +71,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
33552 .balign 8
33553 GLOBAL(trampoline_header)
33554 tr_start: .space 4
33555- tr_gdt_pad: .space 2
33556+ tr_boot_cs: .space 2
33557 tr_gdt: .space 6
33558 END(trampoline_header)
33559
33560diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
33561index bb360dc..d0fd8f8 100644
33562--- a/arch/x86/realmode/rm/trampoline_64.S
33563+++ b/arch/x86/realmode/rm/trampoline_64.S
33564@@ -94,6 +94,7 @@ ENTRY(startup_32)
33565 movl %edx, %gs
33566
33567 movl pa_tr_cr4, %eax
33568+ andl $~X86_CR4_PCIDE, %eax
33569 movl %eax, %cr4 # Enable PAE mode
33570
33571 # Setup trampoline 4 level pagetables
33572@@ -107,7 +108,7 @@ ENTRY(startup_32)
33573 wrmsr
33574
33575 # Enable paging and in turn activate Long Mode
33576- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
33577+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
33578 movl %eax, %cr0
33579
33580 /*
33581diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
33582index e812034..c747134 100644
33583--- a/arch/x86/tools/Makefile
33584+++ b/arch/x86/tools/Makefile
33585@@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in
33586
33587 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
33588
33589-HOST_EXTRACFLAGS += -I$(srctree)/tools/include
33590+HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb
33591 hostprogs-y += relocs
33592 relocs-objs := relocs_32.o relocs_64.o relocs_common.o
33593 relocs: $(obj)/relocs
33594diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
33595index f7bab68..b6d9886 100644
33596--- a/arch/x86/tools/relocs.c
33597+++ b/arch/x86/tools/relocs.c
33598@@ -1,5 +1,7 @@
33599 /* This is included from relocs_32/64.c */
33600
33601+#include "../../../include/generated/autoconf.h"
33602+
33603 #define ElfW(type) _ElfW(ELF_BITS, type)
33604 #define _ElfW(bits, type) __ElfW(bits, type)
33605 #define __ElfW(bits, type) Elf##bits##_##type
33606@@ -11,6 +13,7 @@
33607 #define Elf_Sym ElfW(Sym)
33608
33609 static Elf_Ehdr ehdr;
33610+static Elf_Phdr *phdr;
33611
33612 struct relocs {
33613 uint32_t *offset;
33614@@ -383,9 +386,39 @@ static void read_ehdr(FILE *fp)
33615 }
33616 }
33617
33618+static void read_phdrs(FILE *fp)
33619+{
33620+ unsigned int i;
33621+
33622+ phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr));
33623+ if (!phdr) {
33624+ die("Unable to allocate %d program headers\n",
33625+ ehdr.e_phnum);
33626+ }
33627+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
33628+ die("Seek to %d failed: %s\n",
33629+ ehdr.e_phoff, strerror(errno));
33630+ }
33631+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
33632+ die("Cannot read ELF program headers: %s\n",
33633+ strerror(errno));
33634+ }
33635+ for(i = 0; i < ehdr.e_phnum; i++) {
33636+ phdr[i].p_type = elf_word_to_cpu(phdr[i].p_type);
33637+ phdr[i].p_offset = elf_off_to_cpu(phdr[i].p_offset);
33638+ phdr[i].p_vaddr = elf_addr_to_cpu(phdr[i].p_vaddr);
33639+ phdr[i].p_paddr = elf_addr_to_cpu(phdr[i].p_paddr);
33640+ phdr[i].p_filesz = elf_word_to_cpu(phdr[i].p_filesz);
33641+ phdr[i].p_memsz = elf_word_to_cpu(phdr[i].p_memsz);
33642+ phdr[i].p_flags = elf_word_to_cpu(phdr[i].p_flags);
33643+ phdr[i].p_align = elf_word_to_cpu(phdr[i].p_align);
33644+ }
33645+
33646+}
33647+
33648 static void read_shdrs(FILE *fp)
33649 {
33650- int i;
33651+ unsigned int i;
33652 Elf_Shdr shdr;
33653
33654 secs = calloc(ehdr.e_shnum, sizeof(struct section));
33655@@ -420,7 +453,7 @@ static void read_shdrs(FILE *fp)
33656
33657 static void read_strtabs(FILE *fp)
33658 {
33659- int i;
33660+ unsigned int i;
33661 for (i = 0; i < ehdr.e_shnum; i++) {
33662 struct section *sec = &secs[i];
33663 if (sec->shdr.sh_type != SHT_STRTAB) {
33664@@ -445,7 +478,7 @@ static void read_strtabs(FILE *fp)
33665
33666 static void read_symtabs(FILE *fp)
33667 {
33668- int i,j;
33669+ unsigned int i,j;
33670 for (i = 0; i < ehdr.e_shnum; i++) {
33671 struct section *sec = &secs[i];
33672 if (sec->shdr.sh_type != SHT_SYMTAB) {
33673@@ -476,9 +509,11 @@ static void read_symtabs(FILE *fp)
33674 }
33675
33676
33677-static void read_relocs(FILE *fp)
33678+static void read_relocs(FILE *fp, int use_real_mode)
33679 {
33680- int i,j;
33681+ unsigned int i,j;
33682+ uint32_t base;
33683+
33684 for (i = 0; i < ehdr.e_shnum; i++) {
33685 struct section *sec = &secs[i];
33686 if (sec->shdr.sh_type != SHT_REL_TYPE) {
33687@@ -498,9 +533,22 @@ static void read_relocs(FILE *fp)
33688 die("Cannot read symbol table: %s\n",
33689 strerror(errno));
33690 }
33691+ base = 0;
33692+
33693+#ifdef CONFIG_X86_32
33694+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
33695+ if (phdr[j].p_type != PT_LOAD )
33696+ continue;
33697+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
33698+ continue;
33699+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
33700+ break;
33701+ }
33702+#endif
33703+
33704 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
33705 Elf_Rel *rel = &sec->reltab[j];
33706- rel->r_offset = elf_addr_to_cpu(rel->r_offset);
33707+ rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base;
33708 rel->r_info = elf_xword_to_cpu(rel->r_info);
33709 #if (SHT_REL_TYPE == SHT_RELA)
33710 rel->r_addend = elf_xword_to_cpu(rel->r_addend);
33711@@ -512,7 +560,7 @@ static void read_relocs(FILE *fp)
33712
33713 static void print_absolute_symbols(void)
33714 {
33715- int i;
33716+ unsigned int i;
33717 const char *format;
33718
33719 if (ELF_BITS == 64)
33720@@ -525,7 +573,7 @@ static void print_absolute_symbols(void)
33721 for (i = 0; i < ehdr.e_shnum; i++) {
33722 struct section *sec = &secs[i];
33723 char *sym_strtab;
33724- int j;
33725+ unsigned int j;
33726
33727 if (sec->shdr.sh_type != SHT_SYMTAB) {
33728 continue;
33729@@ -552,7 +600,7 @@ static void print_absolute_symbols(void)
33730
33731 static void print_absolute_relocs(void)
33732 {
33733- int i, printed = 0;
33734+ unsigned int i, printed = 0;
33735 const char *format;
33736
33737 if (ELF_BITS == 64)
33738@@ -565,7 +613,7 @@ static void print_absolute_relocs(void)
33739 struct section *sec_applies, *sec_symtab;
33740 char *sym_strtab;
33741 Elf_Sym *sh_symtab;
33742- int j;
33743+ unsigned int j;
33744 if (sec->shdr.sh_type != SHT_REL_TYPE) {
33745 continue;
33746 }
33747@@ -642,13 +690,13 @@ static void add_reloc(struct relocs *r, uint32_t offset)
33748 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
33749 Elf_Sym *sym, const char *symname))
33750 {
33751- int i;
33752+ unsigned int i;
33753 /* Walk through the relocations */
33754 for (i = 0; i < ehdr.e_shnum; i++) {
33755 char *sym_strtab;
33756 Elf_Sym *sh_symtab;
33757 struct section *sec_applies, *sec_symtab;
33758- int j;
33759+ unsigned int j;
33760 struct section *sec = &secs[i];
33761
33762 if (sec->shdr.sh_type != SHT_REL_TYPE) {
33763@@ -812,6 +860,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
33764 {
33765 unsigned r_type = ELF32_R_TYPE(rel->r_info);
33766 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
33767+ char *sym_strtab = sec->link->link->strtab;
33768+
33769+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
33770+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
33771+ return 0;
33772+
33773+#ifdef CONFIG_PAX_KERNEXEC
33774+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
33775+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
33776+ return 0;
33777+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
33778+ return 0;
33779+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
33780+ return 0;
33781+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
33782+ return 0;
33783+#endif
33784
33785 switch (r_type) {
33786 case R_386_NONE:
33787@@ -950,7 +1015,7 @@ static int write32_as_text(uint32_t v, FILE *f)
33788
33789 static void emit_relocs(int as_text, int use_real_mode)
33790 {
33791- int i;
33792+ unsigned int i;
33793 int (*write_reloc)(uint32_t, FILE *) = write32;
33794 int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
33795 const char *symname);
33796@@ -1026,10 +1091,11 @@ void process(FILE *fp, int use_real_mode, int as_text,
33797 {
33798 regex_init(use_real_mode);
33799 read_ehdr(fp);
33800+ read_phdrs(fp);
33801 read_shdrs(fp);
33802 read_strtabs(fp);
33803 read_symtabs(fp);
33804- read_relocs(fp);
33805+ read_relocs(fp, use_real_mode);
33806 if (ELF_BITS == 64)
33807 percpu_init();
33808 if (show_absolute_syms) {
33809diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
33810index 80ffa5b..a33bd15 100644
33811--- a/arch/x86/um/tls_32.c
33812+++ b/arch/x86/um/tls_32.c
33813@@ -260,7 +260,7 @@ out:
33814 if (unlikely(task == current &&
33815 !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
33816 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
33817- "without flushed TLS.", current->pid);
33818+ "without flushed TLS.", task_pid_nr(current));
33819 }
33820
33821 return 0;
33822diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
33823index fd14be1..e3c79c0 100644
33824--- a/arch/x86/vdso/Makefile
33825+++ b/arch/x86/vdso/Makefile
33826@@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@
33827 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
33828 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
33829
33830-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
33831+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
33832 GCOV_PROFILE := n
33833
33834 #
33835diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
33836index 0faad64..39ef157 100644
33837--- a/arch/x86/vdso/vdso32-setup.c
33838+++ b/arch/x86/vdso/vdso32-setup.c
33839@@ -25,6 +25,7 @@
33840 #include <asm/tlbflush.h>
33841 #include <asm/vdso.h>
33842 #include <asm/proto.h>
33843+#include <asm/mman.h>
33844
33845 enum {
33846 VDSO_DISABLED = 0,
33847@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
33848 void enable_sep_cpu(void)
33849 {
33850 int cpu = get_cpu();
33851- struct tss_struct *tss = &per_cpu(init_tss, cpu);
33852+ struct tss_struct *tss = init_tss + cpu;
33853
33854 if (!boot_cpu_has(X86_FEATURE_SEP)) {
33855 put_cpu();
33856@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
33857 gate_vma.vm_start = FIXADDR_USER_START;
33858 gate_vma.vm_end = FIXADDR_USER_END;
33859 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
33860- gate_vma.vm_page_prot = __P101;
33861+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
33862
33863 return 0;
33864 }
33865@@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
33866 if (compat)
33867 addr = VDSO_HIGH_BASE;
33868 else {
33869- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
33870+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
33871 if (IS_ERR_VALUE(addr)) {
33872 ret = addr;
33873 goto up_fail;
33874 }
33875 }
33876
33877- current->mm->context.vdso = (void *)addr;
33878+ current->mm->context.vdso = addr;
33879
33880 if (compat_uses_vma || !compat) {
33881 /*
33882@@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
33883 }
33884
33885 current_thread_info()->sysenter_return =
33886- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
33887+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
33888
33889 up_fail:
33890 if (ret)
33891- current->mm->context.vdso = NULL;
33892+ current->mm->context.vdso = 0;
33893
33894 up_write(&mm->mmap_sem);
33895
33896@@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
33897
33898 const char *arch_vma_name(struct vm_area_struct *vma)
33899 {
33900- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
33901+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
33902 return "[vdso]";
33903+
33904+#ifdef CONFIG_PAX_SEGMEXEC
33905+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
33906+ return "[vdso]";
33907+#endif
33908+
33909 return NULL;
33910 }
33911
33912@@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
33913 * Check to see if the corresponding task was created in compat vdso
33914 * mode.
33915 */
33916- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
33917+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
33918 return &gate_vma;
33919 return NULL;
33920 }
33921diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
33922index 431e875..cbb23f3 100644
33923--- a/arch/x86/vdso/vma.c
33924+++ b/arch/x86/vdso/vma.c
33925@@ -16,8 +16,6 @@
33926 #include <asm/vdso.h>
33927 #include <asm/page.h>
33928
33929-unsigned int __read_mostly vdso_enabled = 1;
33930-
33931 extern char vdso_start[], vdso_end[];
33932 extern unsigned short vdso_sync_cpuid;
33933
33934@@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
33935 * unaligned here as a result of stack start randomization.
33936 */
33937 addr = PAGE_ALIGN(addr);
33938- addr = align_vdso_addr(addr);
33939
33940 return addr;
33941 }
33942@@ -154,30 +151,31 @@ static int setup_additional_pages(struct linux_binprm *bprm,
33943 unsigned size)
33944 {
33945 struct mm_struct *mm = current->mm;
33946- unsigned long addr;
33947+ unsigned long addr = 0;
33948 int ret;
33949
33950- if (!vdso_enabled)
33951- return 0;
33952-
33953 down_write(&mm->mmap_sem);
33954+
33955+#ifdef CONFIG_PAX_RANDMMAP
33956+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
33957+#endif
33958+
33959 addr = vdso_addr(mm->start_stack, size);
33960+ addr = align_vdso_addr(addr);
33961 addr = get_unmapped_area(NULL, addr, size, 0, 0);
33962 if (IS_ERR_VALUE(addr)) {
33963 ret = addr;
33964 goto up_fail;
33965 }
33966
33967- current->mm->context.vdso = (void *)addr;
33968+ mm->context.vdso = addr;
33969
33970 ret = install_special_mapping(mm, addr, size,
33971 VM_READ|VM_EXEC|
33972 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
33973 pages);
33974- if (ret) {
33975- current->mm->context.vdso = NULL;
33976- goto up_fail;
33977- }
33978+ if (ret)
33979+ mm->context.vdso = 0;
33980
33981 up_fail:
33982 up_write(&mm->mmap_sem);
33983@@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
33984 vdsox32_size);
33985 }
33986 #endif
33987-
33988-static __init int vdso_setup(char *s)
33989-{
33990- vdso_enabled = simple_strtoul(s, NULL, 0);
33991- return 0;
33992-}
33993-__setup("vdso=", vdso_setup);
33994diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
33995index a492be2..08678da 100644
33996--- a/arch/x86/xen/enlighten.c
33997+++ b/arch/x86/xen/enlighten.c
33998@@ -123,8 +123,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
33999
34000 struct shared_info xen_dummy_shared_info;
34001
34002-void *xen_initial_gdt;
34003-
34004 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
34005 __read_mostly int xen_have_vector_callback;
34006 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
34007@@ -542,8 +540,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
34008 {
34009 unsigned long va = dtr->address;
34010 unsigned int size = dtr->size + 1;
34011- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
34012- unsigned long frames[pages];
34013+ unsigned long frames[65536 / PAGE_SIZE];
34014 int f;
34015
34016 /*
34017@@ -591,8 +588,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
34018 {
34019 unsigned long va = dtr->address;
34020 unsigned int size = dtr->size + 1;
34021- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
34022- unsigned long frames[pages];
34023+ unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
34024 int f;
34025
34026 /*
34027@@ -600,7 +596,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
34028 * 8-byte entries, or 16 4k pages..
34029 */
34030
34031- BUG_ON(size > 65536);
34032+ BUG_ON(size > GDT_SIZE);
34033 BUG_ON(va & ~PAGE_MASK);
34034
34035 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
34036@@ -985,7 +981,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
34037 return 0;
34038 }
34039
34040-static void set_xen_basic_apic_ops(void)
34041+static void __init set_xen_basic_apic_ops(void)
34042 {
34043 apic->read = xen_apic_read;
34044 apic->write = xen_apic_write;
34045@@ -1290,30 +1286,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
34046 #endif
34047 };
34048
34049-static void xen_reboot(int reason)
34050+static __noreturn void xen_reboot(int reason)
34051 {
34052 struct sched_shutdown r = { .reason = reason };
34053
34054- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
34055- BUG();
34056+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
34057+ BUG();
34058 }
34059
34060-static void xen_restart(char *msg)
34061+static __noreturn void xen_restart(char *msg)
34062 {
34063 xen_reboot(SHUTDOWN_reboot);
34064 }
34065
34066-static void xen_emergency_restart(void)
34067+static __noreturn void xen_emergency_restart(void)
34068 {
34069 xen_reboot(SHUTDOWN_reboot);
34070 }
34071
34072-static void xen_machine_halt(void)
34073+static __noreturn void xen_machine_halt(void)
34074 {
34075 xen_reboot(SHUTDOWN_poweroff);
34076 }
34077
34078-static void xen_machine_power_off(void)
34079+static __noreturn void xen_machine_power_off(void)
34080 {
34081 if (pm_power_off)
34082 pm_power_off();
34083@@ -1464,7 +1460,17 @@ asmlinkage void __init xen_start_kernel(void)
34084 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
34085
34086 /* Work out if we support NX */
34087- x86_configure_nx();
34088+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34089+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
34090+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
34091+ unsigned l, h;
34092+
34093+ __supported_pte_mask |= _PAGE_NX;
34094+ rdmsr(MSR_EFER, l, h);
34095+ l |= EFER_NX;
34096+ wrmsr(MSR_EFER, l, h);
34097+ }
34098+#endif
34099
34100 xen_setup_features();
34101
34102@@ -1495,13 +1501,6 @@ asmlinkage void __init xen_start_kernel(void)
34103
34104 machine_ops = xen_machine_ops;
34105
34106- /*
34107- * The only reliable way to retain the initial address of the
34108- * percpu gdt_page is to remember it here, so we can go and
34109- * mark it RW later, when the initial percpu area is freed.
34110- */
34111- xen_initial_gdt = &per_cpu(gdt_page, 0);
34112-
34113 xen_smp_init();
34114
34115 #ifdef CONFIG_ACPI_NUMA
34116@@ -1700,7 +1699,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
34117 return NOTIFY_OK;
34118 }
34119
34120-static struct notifier_block xen_hvm_cpu_notifier __cpuinitdata = {
34121+static struct notifier_block xen_hvm_cpu_notifier = {
34122 .notifier_call = xen_hvm_cpu_notify,
34123 };
34124
34125diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
34126index fdc3ba2..3daee39 100644
34127--- a/arch/x86/xen/mmu.c
34128+++ b/arch/x86/xen/mmu.c
34129@@ -1894,6 +1894,9 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
34130 /* L3_k[510] -> level2_kernel_pgt
34131 * L3_i[511] -> level2_fixmap_pgt */
34132 convert_pfn_mfn(level3_kernel_pgt);
34133+ convert_pfn_mfn(level3_vmalloc_start_pgt);
34134+ convert_pfn_mfn(level3_vmalloc_end_pgt);
34135+ convert_pfn_mfn(level3_vmemmap_pgt);
34136
34137 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
34138 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
34139@@ -1923,8 +1926,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
34140 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
34141 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
34142 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
34143+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
34144+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
34145+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
34146 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
34147 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
34148+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
34149 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
34150 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
34151
34152@@ -2108,6 +2115,7 @@ static void __init xen_post_allocator_init(void)
34153 pv_mmu_ops.set_pud = xen_set_pud;
34154 #if PAGETABLE_LEVELS == 4
34155 pv_mmu_ops.set_pgd = xen_set_pgd;
34156+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
34157 #endif
34158
34159 /* This will work as long as patching hasn't happened yet
34160@@ -2186,6 +2194,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
34161 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
34162 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
34163 .set_pgd = xen_set_pgd_hyper,
34164+ .set_pgd_batched = xen_set_pgd_hyper,
34165
34166 .alloc_pud = xen_alloc_pmd_init,
34167 .release_pud = xen_release_pmd_init,
34168diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
e2b79cd1 34169index a1e58e1..9392ad8 100644
bb5f0bf8
AF
34170--- a/arch/x86/xen/smp.c
34171+++ b/arch/x86/xen/smp.c
34172@@ -240,11 +240,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
34173 {
34174 BUG_ON(smp_processor_id() != 0);
34175 native_smp_prepare_boot_cpu();
34176-
34177- /* We've switched to the "real" per-cpu gdt, so make sure the
34178- old memory can be recycled */
34179- make_lowmem_page_readwrite(xen_initial_gdt);
34180-
34181 xen_filter_cpu_maps();
34182 xen_setup_vcpu_info_placement();
34183 }
34184@@ -314,7 +309,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
34185 ctxt->user_regs.ss = __KERNEL_DS;
34186 #ifdef CONFIG_X86_32
34187 ctxt->user_regs.fs = __KERNEL_PERCPU;
34188- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
34189+ savesegment(gs, ctxt->user_regs.gs);
34190 #else
34191 ctxt->gs_base_kernel = per_cpu_offset(cpu);
34192 #endif
34193@@ -324,8 +319,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
34194
34195 {
34196 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
34197- ctxt->user_regs.ds = __USER_DS;
34198- ctxt->user_regs.es = __USER_DS;
34199+ ctxt->user_regs.ds = __KERNEL_DS;
34200+ ctxt->user_regs.es = __KERNEL_DS;
34201
34202 xen_copy_trap_info(ctxt->trap_ctxt);
34203
34204@@ -370,13 +365,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle)
34205 int rc;
34206
34207 per_cpu(current_task, cpu) = idle;
34208+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
34209 #ifdef CONFIG_X86_32
34210 irq_ctx_init(cpu);
34211 #else
34212 clear_tsk_thread_flag(idle, TIF_FORK);
34213- per_cpu(kernel_stack, cpu) =
34214- (unsigned long)task_stack_page(idle) -
34215- KERNEL_STACK_OFFSET + THREAD_SIZE;
34216+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
34217 #endif
34218 xen_setup_runstate_info(cpu);
34219 xen_setup_timer(cpu);
34220@@ -651,7 +645,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
34221
34222 void __init xen_smp_init(void)
34223 {
34224- smp_ops = xen_smp_ops;
34225+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
34226 xen_fill_possible_map();
34227 xen_init_spinlocks();
34228 }
34229diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
34230index 33ca6e4..0ded929 100644
34231--- a/arch/x86/xen/xen-asm_32.S
34232+++ b/arch/x86/xen/xen-asm_32.S
34233@@ -84,14 +84,14 @@ ENTRY(xen_iret)
34234 ESP_OFFSET=4 # bytes pushed onto stack
34235
34236 /*
34237- * Store vcpu_info pointer for easy access. Do it this way to
34238- * avoid having to reload %fs
34239+ * Store vcpu_info pointer for easy access.
34240 */
34241 #ifdef CONFIG_SMP
34242- GET_THREAD_INFO(%eax)
34243- movl %ss:TI_cpu(%eax), %eax
34244- movl %ss:__per_cpu_offset(,%eax,4), %eax
34245- mov %ss:xen_vcpu(%eax), %eax
34246+ push %fs
34247+ mov $(__KERNEL_PERCPU), %eax
34248+ mov %eax, %fs
34249+ mov PER_CPU_VAR(xen_vcpu), %eax
34250+ pop %fs
34251 #else
34252 movl %ss:xen_vcpu, %eax
34253 #endif
34254diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
34255index 7faed58..ba4427c 100644
34256--- a/arch/x86/xen/xen-head.S
34257+++ b/arch/x86/xen/xen-head.S
34258@@ -19,6 +19,17 @@ ENTRY(startup_xen)
34259 #ifdef CONFIG_X86_32
34260 mov %esi,xen_start_info
34261 mov $init_thread_union+THREAD_SIZE,%esp
34262+#ifdef CONFIG_SMP
34263+ movl $cpu_gdt_table,%edi
34264+ movl $__per_cpu_load,%eax
34265+ movw %ax,__KERNEL_PERCPU + 2(%edi)
34266+ rorl $16,%eax
34267+ movb %al,__KERNEL_PERCPU + 4(%edi)
34268+ movb %ah,__KERNEL_PERCPU + 7(%edi)
34269+ movl $__per_cpu_end - 1,%eax
34270+ subl $__per_cpu_start,%eax
34271+ movw %ax,__KERNEL_PERCPU + 0(%edi)
34272+#endif
34273 #else
34274 mov %rsi,xen_start_info
34275 mov $init_thread_union+THREAD_SIZE,%rsp
34276diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
34277index a95b417..b6dbd0b 100644
34278--- a/arch/x86/xen/xen-ops.h
34279+++ b/arch/x86/xen/xen-ops.h
34280@@ -10,8 +10,6 @@
34281 extern const char xen_hypervisor_callback[];
34282 extern const char xen_failsafe_callback[];
34283
34284-extern void *xen_initial_gdt;
34285-
34286 struct trap_info;
34287 void xen_copy_trap_info(struct trap_info *traps);
34288
34289diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
34290index 525bd3d..ef888b1 100644
34291--- a/arch/xtensa/variants/dc232b/include/variant/core.h
34292+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
34293@@ -119,9 +119,9 @@
34294 ----------------------------------------------------------------------*/
34295
34296 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
34297-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
34298 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
34299 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
34300+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
34301
34302 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
34303 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
34304diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
34305index 2f33760..835e50a 100644
34306--- a/arch/xtensa/variants/fsf/include/variant/core.h
34307+++ b/arch/xtensa/variants/fsf/include/variant/core.h
34308@@ -11,6 +11,7 @@
34309 #ifndef _XTENSA_CORE_H
34310 #define _XTENSA_CORE_H
34311
34312+#include <linux/const.h>
34313
34314 /****************************************************************************
34315 Parameters Useful for Any Code, USER or PRIVILEGED
34316@@ -112,9 +113,9 @@
34317 ----------------------------------------------------------------------*/
34318
34319 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
34320-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
34321 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
34322 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
34323+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
34324
34325 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
34326 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
34327diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
34328index af00795..2bb8105 100644
34329--- a/arch/xtensa/variants/s6000/include/variant/core.h
34330+++ b/arch/xtensa/variants/s6000/include/variant/core.h
34331@@ -11,6 +11,7 @@
34332 #ifndef _XTENSA_CORE_CONFIGURATION_H
34333 #define _XTENSA_CORE_CONFIGURATION_H
34334
34335+#include <linux/const.h>
34336
34337 /****************************************************************************
34338 Parameters Useful for Any Code, USER or PRIVILEGED
34339@@ -118,9 +119,9 @@
34340 ----------------------------------------------------------------------*/
34341
34342 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
34343-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
34344 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
34345 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
34346+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
34347
34348 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
34349 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
34350diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
34351index 58916af..eb9dbcf6 100644
34352--- a/block/blk-iopoll.c
34353+++ b/block/blk-iopoll.c
34354@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
34355 }
34356 EXPORT_SYMBOL(blk_iopoll_complete);
34357
34358-static void blk_iopoll_softirq(struct softirq_action *h)
34359+static void blk_iopoll_softirq(void)
34360 {
34361 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
34362 int rearm = 0, budget = blk_iopoll_budget;
34363@@ -209,7 +209,7 @@ static int __cpuinit blk_iopoll_cpu_notify(struct notifier_block *self,
34364 return NOTIFY_OK;
34365 }
34366
34367-static struct notifier_block __cpuinitdata blk_iopoll_cpu_notifier = {
34368+static struct notifier_block blk_iopoll_cpu_notifier = {
34369 .notifier_call = blk_iopoll_cpu_notify,
34370 };
34371
34372diff --git a/block/blk-map.c b/block/blk-map.c
34373index 623e1cd..ca1e109 100644
34374--- a/block/blk-map.c
34375+++ b/block/blk-map.c
34376@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
34377 if (!len || !kbuf)
34378 return -EINVAL;
34379
34380- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
34381+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
34382 if (do_copy)
34383 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
34384 else
34385diff --git a/block/blk-softirq.c b/block/blk-softirq.c
34386index 467c8de..f3628c5 100644
34387--- a/block/blk-softirq.c
34388+++ b/block/blk-softirq.c
34389@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
34390 * Softirq action handler - move entries to local list and loop over them
34391 * while passing them to the queue registered handler.
34392 */
34393-static void blk_done_softirq(struct softirq_action *h)
34394+static void blk_done_softirq(void)
34395 {
34396 struct list_head *cpu_list, local_list;
34397
34398@@ -98,7 +98,7 @@ static int __cpuinit blk_cpu_notify(struct notifier_block *self,
34399 return NOTIFY_OK;
34400 }
34401
34402-static struct notifier_block __cpuinitdata blk_cpu_notifier = {
34403+static struct notifier_block blk_cpu_notifier = {
34404 .notifier_call = blk_cpu_notify,
34405 };
34406
34407diff --git a/block/bsg.c b/block/bsg.c
34408index 420a5a9..23834aa 100644
34409--- a/block/bsg.c
34410+++ b/block/bsg.c
34411@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
34412 struct sg_io_v4 *hdr, struct bsg_device *bd,
34413 fmode_t has_write_perm)
34414 {
34415+ unsigned char tmpcmd[sizeof(rq->__cmd)];
34416+ unsigned char *cmdptr;
34417+
34418 if (hdr->request_len > BLK_MAX_CDB) {
34419 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
34420 if (!rq->cmd)
34421 return -ENOMEM;
34422- }
34423+ cmdptr = rq->cmd;
34424+ } else
34425+ cmdptr = tmpcmd;
34426
34427- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
34428+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
34429 hdr->request_len))
34430 return -EFAULT;
34431
34432+ if (cmdptr != rq->cmd)
34433+ memcpy(rq->cmd, cmdptr, hdr->request_len);
34434+
34435 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
34436 if (blk_verify_command(rq->cmd, has_write_perm))
34437 return -EPERM;
34438diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
34439index 7c668c8..db3521c 100644
34440--- a/block/compat_ioctl.c
34441+++ b/block/compat_ioctl.c
34442@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
34443 err |= __get_user(f->spec1, &uf->spec1);
34444 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
34445 err |= __get_user(name, &uf->name);
34446- f->name = compat_ptr(name);
34447+ f->name = (void __force_kernel *)compat_ptr(name);
34448 if (err) {
34449 err = -EFAULT;
34450 goto out;
34451diff --git a/block/genhd.c b/block/genhd.c
34452index cdeb527..10aa34db 100644
34453--- a/block/genhd.c
34454+++ b/block/genhd.c
34455@@ -467,21 +467,24 @@ static char *bdevt_str(dev_t devt, char *buf)
34456
34457 /*
34458 * Register device numbers dev..(dev+range-1)
34459- * range must be nonzero
34460+ * Noop if @range is zero.
34461 * The hash chain is sorted on range, so that subranges can override.
34462 */
34463 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
34464 struct kobject *(*probe)(dev_t, int *, void *),
34465 int (*lock)(dev_t, void *), void *data)
34466 {
34467- kobj_map(bdev_map, devt, range, module, probe, lock, data);
34468+ if (range)
34469+ kobj_map(bdev_map, devt, range, module, probe, lock, data);
34470 }
34471
34472 EXPORT_SYMBOL(blk_register_region);
34473
34474+/* undo blk_register_region(), noop if @range is zero */
34475 void blk_unregister_region(dev_t devt, unsigned long range)
34476 {
34477- kobj_unmap(bdev_map, devt, range);
34478+ if (range)
34479+ kobj_unmap(bdev_map, devt, range);
34480 }
34481
34482 EXPORT_SYMBOL(blk_unregister_region);
34483diff --git a/block/partitions/efi.c b/block/partitions/efi.c
34484index c85fc89..51e690b 100644
34485--- a/block/partitions/efi.c
34486+++ b/block/partitions/efi.c
34487@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
34488 if (!gpt)
34489 return NULL;
34490
34491+ if (!le32_to_cpu(gpt->num_partition_entries))
34492+ return NULL;
34493+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
34494+ if (!pte)
34495+ return NULL;
34496+
34497 count = le32_to_cpu(gpt->num_partition_entries) *
34498 le32_to_cpu(gpt->sizeof_partition_entry);
34499- if (!count)
34500- return NULL;
34501- pte = kmalloc(count, GFP_KERNEL);
34502- if (!pte)
34503- return NULL;
34504-
34505 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
34506 (u8 *) pte,
34507 count) < count) {
34508diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
34509index a5ffcc9..3cedc9c 100644
34510--- a/block/scsi_ioctl.c
34511+++ b/block/scsi_ioctl.c
34512@@ -224,8 +224,20 @@ EXPORT_SYMBOL(blk_verify_command);
34513 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
34514 struct sg_io_hdr *hdr, fmode_t mode)
34515 {
34516- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
34517+ unsigned char tmpcmd[sizeof(rq->__cmd)];
34518+ unsigned char *cmdptr;
34519+
34520+ if (rq->cmd != rq->__cmd)
34521+ cmdptr = rq->cmd;
34522+ else
34523+ cmdptr = tmpcmd;
34524+
34525+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
34526 return -EFAULT;
34527+
34528+ if (cmdptr != rq->cmd)
34529+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
34530+
34531 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
34532 return -EPERM;
34533
34534@@ -434,6 +446,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
34535 int err;
34536 unsigned int in_len, out_len, bytes, opcode, cmdlen;
34537 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
34538+ unsigned char tmpcmd[sizeof(rq->__cmd)];
34539+ unsigned char *cmdptr;
34540
34541 if (!sic)
34542 return -EINVAL;
34543@@ -467,9 +481,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
34544 */
34545 err = -EFAULT;
34546 rq->cmd_len = cmdlen;
34547- if (copy_from_user(rq->cmd, sic->data, cmdlen))
34548+
34549+ if (rq->cmd != rq->__cmd)
34550+ cmdptr = rq->cmd;
34551+ else
34552+ cmdptr = tmpcmd;
34553+
34554+ if (copy_from_user(cmdptr, sic->data, cmdlen))
34555 goto error;
34556
34557+ if (rq->cmd != cmdptr)
34558+ memcpy(rq->cmd, cmdptr, cmdlen);
34559+
34560 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
34561 goto error;
34562
34563diff --git a/crypto/cryptd.c b/crypto/cryptd.c
34564index 7bdd61b..afec999 100644
34565--- a/crypto/cryptd.c
34566+++ b/crypto/cryptd.c
34567@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
34568
34569 struct cryptd_blkcipher_request_ctx {
34570 crypto_completion_t complete;
34571-};
34572+} __no_const;
34573
34574 struct cryptd_hash_ctx {
34575 struct crypto_shash *child;
34576@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
34577
34578 struct cryptd_aead_request_ctx {
34579 crypto_completion_t complete;
34580-};
34581+} __no_const;
34582
34583 static void cryptd_queue_worker(struct work_struct *work);
34584
34585diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
34586index b2c99dc..476c9fb 100644
34587--- a/crypto/pcrypt.c
34588+++ b/crypto/pcrypt.c
34589@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
34590 int ret;
34591
34592 pinst->kobj.kset = pcrypt_kset;
34593- ret = kobject_add(&pinst->kobj, NULL, name);
34594+ ret = kobject_add(&pinst->kobj, NULL, "%s", name);
34595 if (!ret)
34596 kobject_uevent(&pinst->kobj, KOBJ_ADD);
34597
34598@@ -455,8 +455,8 @@ static int pcrypt_init_padata(struct padata_pcrypt *pcrypt,
34599
34600 get_online_cpus();
34601
34602- pcrypt->wq = alloc_workqueue(name,
34603- WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
34604+ pcrypt->wq = alloc_workqueue("%s",
34605+ WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1, name);
34606 if (!pcrypt->wq)
34607 goto err;
34608
34609diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
34610index f220d64..d359ad6 100644
34611--- a/drivers/acpi/apei/apei-internal.h
34612+++ b/drivers/acpi/apei/apei-internal.h
34613@@ -20,7 +20,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
34614 struct apei_exec_ins_type {
34615 u32 flags;
34616 apei_exec_ins_func_t run;
34617-};
34618+} __do_const;
34619
34620 struct apei_exec_context {
34621 u32 ip;
34622diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
34623index 33dc6a0..4b24b47 100644
34624--- a/drivers/acpi/apei/cper.c
34625+++ b/drivers/acpi/apei/cper.c
34626@@ -39,12 +39,12 @@
34627 */
34628 u64 cper_next_record_id(void)
34629 {
34630- static atomic64_t seq;
34631+ static atomic64_unchecked_t seq;
34632
34633- if (!atomic64_read(&seq))
34634- atomic64_set(&seq, ((u64)get_seconds()) << 32);
34635+ if (!atomic64_read_unchecked(&seq))
34636+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
34637
34638- return atomic64_inc_return(&seq);
34639+ return atomic64_inc_return_unchecked(&seq);
34640 }
34641 EXPORT_SYMBOL_GPL(cper_next_record_id);
34642
34643diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
34644index be60399..778b33e8 100644
34645--- a/drivers/acpi/bgrt.c
34646+++ b/drivers/acpi/bgrt.c
34647@@ -87,8 +87,10 @@ static int __init bgrt_init(void)
34648 return -ENODEV;
34649
34650 sysfs_bin_attr_init(&image_attr);
34651- image_attr.private = bgrt_image;
34652- image_attr.size = bgrt_image_size;
34653+ pax_open_kernel();
34654+ *(void **)&image_attr.private = bgrt_image;
34655+ *(size_t *)&image_attr.size = bgrt_image_size;
34656+ pax_close_kernel();
34657
34658 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
34659 if (!bgrt_kobj)
34660diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
34661index cb96296..b81293b 100644
34662--- a/drivers/acpi/blacklist.c
34663+++ b/drivers/acpi/blacklist.c
34664@@ -52,7 +52,7 @@ struct acpi_blacklist_item {
34665 u32 is_critical_error;
34666 };
34667
34668-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
34669+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
34670
34671 /*
34672 * POLICY: If *anything* doesn't work, put it on the blacklist.
34673@@ -193,7 +193,7 @@ static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
34674 return 0;
34675 }
34676
34677-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
34678+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
34679 {
34680 .callback = dmi_disable_osi_vista,
34681 .ident = "Fujitsu Siemens",
34682diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
34683index 7586544..636a2f0 100644
34684--- a/drivers/acpi/ec_sys.c
34685+++ b/drivers/acpi/ec_sys.c
34686@@ -12,6 +12,7 @@
34687 #include <linux/acpi.h>
34688 #include <linux/debugfs.h>
34689 #include <linux/module.h>
34690+#include <linux/uaccess.h>
34691 #include "internal.h"
34692
34693 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
34694@@ -34,7 +35,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
34695 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
34696 */
34697 unsigned int size = EC_SPACE_SIZE;
34698- u8 *data = (u8 *) buf;
34699+ u8 data;
34700 loff_t init_off = *off;
34701 int err = 0;
34702
34703@@ -47,9 +48,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
34704 size = count;
34705
34706 while (size) {
34707- err = ec_read(*off, &data[*off - init_off]);
34708+ err = ec_read(*off, &data);
34709 if (err)
34710 return err;
34711+ if (put_user(data, &buf[*off - init_off]))
34712+ return -EFAULT;
34713 *off += 1;
34714 size--;
34715 }
34716@@ -65,7 +68,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
34717
34718 unsigned int size = count;
34719 loff_t init_off = *off;
34720- u8 *data = (u8 *) buf;
34721 int err = 0;
34722
34723 if (*off >= EC_SPACE_SIZE)
34724@@ -76,7 +78,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
34725 }
34726
34727 while (size) {
34728- u8 byte_write = data[*off - init_off];
34729+ u8 byte_write;
34730+ if (get_user(byte_write, &buf[*off - init_off]))
34731+ return -EFAULT;
34732 err = ec_write(*off, byte_write);
34733 if (err)
34734 return err;
34735diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
34736index eb133c7..f571552 100644
34737--- a/drivers/acpi/processor_idle.c
34738+++ b/drivers/acpi/processor_idle.c
34739@@ -994,7 +994,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
34740 {
34741 int i, count = CPUIDLE_DRIVER_STATE_START;
34742 struct acpi_processor_cx *cx;
34743- struct cpuidle_state *state;
34744+ cpuidle_state_no_const *state;
34745 struct cpuidle_driver *drv = &acpi_idle_driver;
34746
34747 if (!pr->flags.power_setup_done)
34748diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
34749index fcae5fa..e9f71ea 100644
34750--- a/drivers/acpi/sysfs.c
34751+++ b/drivers/acpi/sysfs.c
34752@@ -423,11 +423,11 @@ static u32 num_counters;
34753 static struct attribute **all_attrs;
34754 static u32 acpi_gpe_count;
34755
34756-static struct attribute_group interrupt_stats_attr_group = {
34757+static attribute_group_no_const interrupt_stats_attr_group = {
34758 .name = "interrupts",
34759 };
34760
34761-static struct kobj_attribute *counter_attrs;
34762+static kobj_attribute_no_const *counter_attrs;
34763
34764 static void delete_gpe_attr_array(void)
34765 {
34766diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
34767index 7b9bdd8..37638ca 100644
34768--- a/drivers/ata/libahci.c
34769+++ b/drivers/ata/libahci.c
34770@@ -1230,7 +1230,7 @@ int ahci_kick_engine(struct ata_port *ap)
34771 }
34772 EXPORT_SYMBOL_GPL(ahci_kick_engine);
34773
34774-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
34775+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
34776 struct ata_taskfile *tf, int is_cmd, u16 flags,
34777 unsigned long timeout_msec)
34778 {
34779diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
34780index adf002a..39bb8f9 100644
34781--- a/drivers/ata/libata-core.c
34782+++ b/drivers/ata/libata-core.c
34783@@ -4792,7 +4792,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
34784 struct ata_port *ap;
34785 unsigned int tag;
34786
34787- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
34788+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
34789 ap = qc->ap;
34790
34791 qc->flags = 0;
34792@@ -4808,7 +4808,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
34793 struct ata_port *ap;
34794 struct ata_link *link;
34795
34796- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
34797+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
34798 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
34799 ap = qc->ap;
34800 link = qc->dev->link;
34801@@ -5926,6 +5926,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
34802 return;
34803
34804 spin_lock(&lock);
34805+ pax_open_kernel();
34806
34807 for (cur = ops->inherits; cur; cur = cur->inherits) {
34808 void **inherit = (void **)cur;
34809@@ -5939,8 +5940,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
34810 if (IS_ERR(*pp))
34811 *pp = NULL;
34812
34813- ops->inherits = NULL;
34814+ *(struct ata_port_operations **)&ops->inherits = NULL;
34815
34816+ pax_close_kernel();
34817 spin_unlock(&lock);
34818 }
34819
34820diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
34821index 7638121..357a965 100644
34822--- a/drivers/ata/pata_arasan_cf.c
34823+++ b/drivers/ata/pata_arasan_cf.c
34824@@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
34825 /* Handle platform specific quirks */
34826 if (quirk) {
34827 if (quirk & CF_BROKEN_PIO) {
34828- ap->ops->set_piomode = NULL;
34829+ pax_open_kernel();
34830+ *(void **)&ap->ops->set_piomode = NULL;
34831+ pax_close_kernel();
34832 ap->pio_mask = 0;
34833 }
34834 if (quirk & CF_BROKEN_MWDMA)
34835diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
34836index f9b983a..887b9d8 100644
34837--- a/drivers/atm/adummy.c
34838+++ b/drivers/atm/adummy.c
34839@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
34840 vcc->pop(vcc, skb);
34841 else
34842 dev_kfree_skb_any(skb);
34843- atomic_inc(&vcc->stats->tx);
34844+ atomic_inc_unchecked(&vcc->stats->tx);
34845
34846 return 0;
34847 }
34848diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
34849index 77a7480d..05cde58 100644
34850--- a/drivers/atm/ambassador.c
34851+++ b/drivers/atm/ambassador.c
34852@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
34853 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
34854
34855 // VC layer stats
34856- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
34857+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
34858
34859 // free the descriptor
34860 kfree (tx_descr);
34861@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
34862 dump_skb ("<<<", vc, skb);
34863
34864 // VC layer stats
34865- atomic_inc(&atm_vcc->stats->rx);
34866+ atomic_inc_unchecked(&atm_vcc->stats->rx);
34867 __net_timestamp(skb);
34868 // end of our responsibility
34869 atm_vcc->push (atm_vcc, skb);
34870@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
34871 } else {
34872 PRINTK (KERN_INFO, "dropped over-size frame");
34873 // should we count this?
34874- atomic_inc(&atm_vcc->stats->rx_drop);
34875+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
34876 }
34877
34878 } else {
34879@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
34880 }
34881
34882 if (check_area (skb->data, skb->len)) {
34883- atomic_inc(&atm_vcc->stats->tx_err);
34884+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
34885 return -ENOMEM; // ?
34886 }
34887
34888diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
34889index 0e3f8f9..765a7a5 100644
34890--- a/drivers/atm/atmtcp.c
34891+++ b/drivers/atm/atmtcp.c
34892@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
34893 if (vcc->pop) vcc->pop(vcc,skb);
34894 else dev_kfree_skb(skb);
34895 if (dev_data) return 0;
34896- atomic_inc(&vcc->stats->tx_err);
34897+ atomic_inc_unchecked(&vcc->stats->tx_err);
34898 return -ENOLINK;
34899 }
34900 size = skb->len+sizeof(struct atmtcp_hdr);
34901@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
34902 if (!new_skb) {
34903 if (vcc->pop) vcc->pop(vcc,skb);
34904 else dev_kfree_skb(skb);
34905- atomic_inc(&vcc->stats->tx_err);
34906+ atomic_inc_unchecked(&vcc->stats->tx_err);
34907 return -ENOBUFS;
34908 }
34909 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
34910@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
34911 if (vcc->pop) vcc->pop(vcc,skb);
34912 else dev_kfree_skb(skb);
34913 out_vcc->push(out_vcc,new_skb);
34914- atomic_inc(&vcc->stats->tx);
34915- atomic_inc(&out_vcc->stats->rx);
34916+ atomic_inc_unchecked(&vcc->stats->tx);
34917+ atomic_inc_unchecked(&out_vcc->stats->rx);
34918 return 0;
34919 }
34920
34921@@ -299,7 +299,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
34922 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
34923 read_unlock(&vcc_sklist_lock);
34924 if (!out_vcc) {
34925- atomic_inc(&vcc->stats->tx_err);
34926+ atomic_inc_unchecked(&vcc->stats->tx_err);
34927 goto done;
34928 }
34929 skb_pull(skb,sizeof(struct atmtcp_hdr));
34930@@ -311,8 +311,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
34931 __net_timestamp(new_skb);
34932 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
34933 out_vcc->push(out_vcc,new_skb);
34934- atomic_inc(&vcc->stats->tx);
34935- atomic_inc(&out_vcc->stats->rx);
34936+ atomic_inc_unchecked(&vcc->stats->tx);
34937+ atomic_inc_unchecked(&out_vcc->stats->rx);
34938 done:
34939 if (vcc->pop) vcc->pop(vcc,skb);
34940 else dev_kfree_skb(skb);
34941diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
34942index b1955ba..b179940 100644
34943--- a/drivers/atm/eni.c
34944+++ b/drivers/atm/eni.c
34945@@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
34946 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
34947 vcc->dev->number);
34948 length = 0;
34949- atomic_inc(&vcc->stats->rx_err);
34950+ atomic_inc_unchecked(&vcc->stats->rx_err);
34951 }
34952 else {
34953 length = ATM_CELL_SIZE-1; /* no HEC */
34954@@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
34955 size);
34956 }
34957 eff = length = 0;
34958- atomic_inc(&vcc->stats->rx_err);
34959+ atomic_inc_unchecked(&vcc->stats->rx_err);
34960 }
34961 else {
34962 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
34963@@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
34964 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
34965 vcc->dev->number,vcc->vci,length,size << 2,descr);
34966 length = eff = 0;
34967- atomic_inc(&vcc->stats->rx_err);
34968+ atomic_inc_unchecked(&vcc->stats->rx_err);
34969 }
34970 }
34971 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
34972@@ -767,7 +767,7 @@ rx_dequeued++;
34973 vcc->push(vcc,skb);
34974 pushed++;
34975 }
34976- atomic_inc(&vcc->stats->rx);
34977+ atomic_inc_unchecked(&vcc->stats->rx);
34978 }
34979 wake_up(&eni_dev->rx_wait);
34980 }
34981@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
34982 PCI_DMA_TODEVICE);
34983 if (vcc->pop) vcc->pop(vcc,skb);
34984 else dev_kfree_skb_irq(skb);
34985- atomic_inc(&vcc->stats->tx);
34986+ atomic_inc_unchecked(&vcc->stats->tx);
34987 wake_up(&eni_dev->tx_wait);
34988 dma_complete++;
34989 }
34990diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
34991index b41c948..a002b17 100644
34992--- a/drivers/atm/firestream.c
34993+++ b/drivers/atm/firestream.c
34994@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
34995 }
34996 }
34997
34998- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
34999+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
35000
35001 fs_dprintk (FS_DEBUG_TXMEM, "i");
35002 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
35003@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
35004 #endif
35005 skb_put (skb, qe->p1 & 0xffff);
35006 ATM_SKB(skb)->vcc = atm_vcc;
35007- atomic_inc(&atm_vcc->stats->rx);
35008+ atomic_inc_unchecked(&atm_vcc->stats->rx);
35009 __net_timestamp(skb);
35010 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
35011 atm_vcc->push (atm_vcc, skb);
35012@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
35013 kfree (pe);
35014 }
35015 if (atm_vcc)
35016- atomic_inc(&atm_vcc->stats->rx_drop);
35017+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
35018 break;
35019 case 0x1f: /* Reassembly abort: no buffers. */
35020 /* Silently increment error counter. */
35021 if (atm_vcc)
35022- atomic_inc(&atm_vcc->stats->rx_drop);
35023+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
35024 break;
35025 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
35026 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
35027diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
35028index 204814e..cede831 100644
35029--- a/drivers/atm/fore200e.c
35030+++ b/drivers/atm/fore200e.c
35031@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
35032 #endif
35033 /* check error condition */
35034 if (*entry->status & STATUS_ERROR)
35035- atomic_inc(&vcc->stats->tx_err);
35036+ atomic_inc_unchecked(&vcc->stats->tx_err);
35037 else
35038- atomic_inc(&vcc->stats->tx);
35039+ atomic_inc_unchecked(&vcc->stats->tx);
35040 }
35041 }
35042
35043@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
35044 if (skb == NULL) {
35045 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
35046
35047- atomic_inc(&vcc->stats->rx_drop);
35048+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35049 return -ENOMEM;
35050 }
35051
35052@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
35053
35054 dev_kfree_skb_any(skb);
35055
35056- atomic_inc(&vcc->stats->rx_drop);
35057+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35058 return -ENOMEM;
35059 }
35060
35061 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
35062
35063 vcc->push(vcc, skb);
35064- atomic_inc(&vcc->stats->rx);
35065+ atomic_inc_unchecked(&vcc->stats->rx);
35066
35067 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
35068
35069@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
35070 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
35071 fore200e->atm_dev->number,
35072 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
35073- atomic_inc(&vcc->stats->rx_err);
35074+ atomic_inc_unchecked(&vcc->stats->rx_err);
35075 }
35076 }
35077
35078@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
35079 goto retry_here;
35080 }
35081
35082- atomic_inc(&vcc->stats->tx_err);
35083+ atomic_inc_unchecked(&vcc->stats->tx_err);
35084
35085 fore200e->tx_sat++;
35086 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
35087diff --git a/drivers/atm/he.c b/drivers/atm/he.c
35088index 507362a..a845e57 100644
35089--- a/drivers/atm/he.c
35090+++ b/drivers/atm/he.c
35091@@ -1698,7 +1698,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
35092
35093 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
35094 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
35095- atomic_inc(&vcc->stats->rx_drop);
35096+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35097 goto return_host_buffers;
35098 }
35099
35100@@ -1725,7 +1725,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
35101 RBRQ_LEN_ERR(he_dev->rbrq_head)
35102 ? "LEN_ERR" : "",
35103 vcc->vpi, vcc->vci);
35104- atomic_inc(&vcc->stats->rx_err);
35105+ atomic_inc_unchecked(&vcc->stats->rx_err);
35106 goto return_host_buffers;
35107 }
35108
35109@@ -1777,7 +1777,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
35110 vcc->push(vcc, skb);
35111 spin_lock(&he_dev->global_lock);
35112
35113- atomic_inc(&vcc->stats->rx);
35114+ atomic_inc_unchecked(&vcc->stats->rx);
35115
35116 return_host_buffers:
35117 ++pdus_assembled;
35118@@ -2103,7 +2103,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
35119 tpd->vcc->pop(tpd->vcc, tpd->skb);
35120 else
35121 dev_kfree_skb_any(tpd->skb);
35122- atomic_inc(&tpd->vcc->stats->tx_err);
35123+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
35124 }
35125 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
35126 return;
35127@@ -2515,7 +2515,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
35128 vcc->pop(vcc, skb);
35129 else
35130 dev_kfree_skb_any(skb);
35131- atomic_inc(&vcc->stats->tx_err);
35132+ atomic_inc_unchecked(&vcc->stats->tx_err);
35133 return -EINVAL;
35134 }
35135
35136@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
35137 vcc->pop(vcc, skb);
35138 else
35139 dev_kfree_skb_any(skb);
35140- atomic_inc(&vcc->stats->tx_err);
35141+ atomic_inc_unchecked(&vcc->stats->tx_err);
35142 return -EINVAL;
35143 }
35144 #endif
35145@@ -2538,7 +2538,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
35146 vcc->pop(vcc, skb);
35147 else
35148 dev_kfree_skb_any(skb);
35149- atomic_inc(&vcc->stats->tx_err);
35150+ atomic_inc_unchecked(&vcc->stats->tx_err);
35151 spin_unlock_irqrestore(&he_dev->global_lock, flags);
35152 return -ENOMEM;
35153 }
35154@@ -2580,7 +2580,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
35155 vcc->pop(vcc, skb);
35156 else
35157 dev_kfree_skb_any(skb);
35158- atomic_inc(&vcc->stats->tx_err);
35159+ atomic_inc_unchecked(&vcc->stats->tx_err);
35160 spin_unlock_irqrestore(&he_dev->global_lock, flags);
35161 return -ENOMEM;
35162 }
35163@@ -2611,7 +2611,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
35164 __enqueue_tpd(he_dev, tpd, cid);
35165 spin_unlock_irqrestore(&he_dev->global_lock, flags);
35166
35167- atomic_inc(&vcc->stats->tx);
35168+ atomic_inc_unchecked(&vcc->stats->tx);
35169
35170 return 0;
35171 }
35172diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
35173index 1dc0519..1aadaf7 100644
35174--- a/drivers/atm/horizon.c
35175+++ b/drivers/atm/horizon.c
35176@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
35177 {
35178 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
35179 // VC layer stats
35180- atomic_inc(&vcc->stats->rx);
35181+ atomic_inc_unchecked(&vcc->stats->rx);
35182 __net_timestamp(skb);
35183 // end of our responsibility
35184 vcc->push (vcc, skb);
35185@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
35186 dev->tx_iovec = NULL;
35187
35188 // VC layer stats
35189- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
35190+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
35191
35192 // free the skb
35193 hrz_kfree_skb (skb);
35194diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
35195index 272f009..a18ba55 100644
35196--- a/drivers/atm/idt77252.c
35197+++ b/drivers/atm/idt77252.c
35198@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
35199 else
35200 dev_kfree_skb(skb);
35201
35202- atomic_inc(&vcc->stats->tx);
35203+ atomic_inc_unchecked(&vcc->stats->tx);
35204 }
35205
35206 atomic_dec(&scq->used);
35207@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
35208 if ((sb = dev_alloc_skb(64)) == NULL) {
35209 printk("%s: Can't allocate buffers for aal0.\n",
35210 card->name);
35211- atomic_add(i, &vcc->stats->rx_drop);
35212+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
35213 break;
35214 }
35215 if (!atm_charge(vcc, sb->truesize)) {
35216 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
35217 card->name);
35218- atomic_add(i - 1, &vcc->stats->rx_drop);
35219+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
35220 dev_kfree_skb(sb);
35221 break;
35222 }
35223@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
35224 ATM_SKB(sb)->vcc = vcc;
35225 __net_timestamp(sb);
35226 vcc->push(vcc, sb);
35227- atomic_inc(&vcc->stats->rx);
35228+ atomic_inc_unchecked(&vcc->stats->rx);
35229
35230 cell += ATM_CELL_PAYLOAD;
35231 }
35232@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
35233 "(CDC: %08x)\n",
35234 card->name, len, rpp->len, readl(SAR_REG_CDC));
35235 recycle_rx_pool_skb(card, rpp);
35236- atomic_inc(&vcc->stats->rx_err);
35237+ atomic_inc_unchecked(&vcc->stats->rx_err);
35238 return;
35239 }
35240 if (stat & SAR_RSQE_CRC) {
35241 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
35242 recycle_rx_pool_skb(card, rpp);
35243- atomic_inc(&vcc->stats->rx_err);
35244+ atomic_inc_unchecked(&vcc->stats->rx_err);
35245 return;
35246 }
35247 if (skb_queue_len(&rpp->queue) > 1) {
35248@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
35249 RXPRINTK("%s: Can't alloc RX skb.\n",
35250 card->name);
35251 recycle_rx_pool_skb(card, rpp);
35252- atomic_inc(&vcc->stats->rx_err);
35253+ atomic_inc_unchecked(&vcc->stats->rx_err);
35254 return;
35255 }
35256 if (!atm_charge(vcc, skb->truesize)) {
35257@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
35258 __net_timestamp(skb);
35259
35260 vcc->push(vcc, skb);
35261- atomic_inc(&vcc->stats->rx);
35262+ atomic_inc_unchecked(&vcc->stats->rx);
35263
35264 return;
35265 }
35266@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
35267 __net_timestamp(skb);
35268
35269 vcc->push(vcc, skb);
35270- atomic_inc(&vcc->stats->rx);
35271+ atomic_inc_unchecked(&vcc->stats->rx);
35272
35273 if (skb->truesize > SAR_FB_SIZE_3)
35274 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
35275@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
35276 if (vcc->qos.aal != ATM_AAL0) {
35277 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
35278 card->name, vpi, vci);
35279- atomic_inc(&vcc->stats->rx_drop);
35280+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35281 goto drop;
35282 }
35283
35284 if ((sb = dev_alloc_skb(64)) == NULL) {
35285 printk("%s: Can't allocate buffers for AAL0.\n",
35286 card->name);
35287- atomic_inc(&vcc->stats->rx_err);
35288+ atomic_inc_unchecked(&vcc->stats->rx_err);
35289 goto drop;
35290 }
35291
35292@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
35293 ATM_SKB(sb)->vcc = vcc;
35294 __net_timestamp(sb);
35295 vcc->push(vcc, sb);
35296- atomic_inc(&vcc->stats->rx);
35297+ atomic_inc_unchecked(&vcc->stats->rx);
35298
35299 drop:
35300 skb_pull(queue, 64);
35301@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
35302
35303 if (vc == NULL) {
35304 printk("%s: NULL connection in send().\n", card->name);
35305- atomic_inc(&vcc->stats->tx_err);
35306+ atomic_inc_unchecked(&vcc->stats->tx_err);
35307 dev_kfree_skb(skb);
35308 return -EINVAL;
35309 }
35310 if (!test_bit(VCF_TX, &vc->flags)) {
35311 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
35312- atomic_inc(&vcc->stats->tx_err);
35313+ atomic_inc_unchecked(&vcc->stats->tx_err);
35314 dev_kfree_skb(skb);
35315 return -EINVAL;
35316 }
35317@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
35318 break;
35319 default:
35320 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
35321- atomic_inc(&vcc->stats->tx_err);
35322+ atomic_inc_unchecked(&vcc->stats->tx_err);
35323 dev_kfree_skb(skb);
35324 return -EINVAL;
35325 }
35326
35327 if (skb_shinfo(skb)->nr_frags != 0) {
35328 printk("%s: No scatter-gather yet.\n", card->name);
35329- atomic_inc(&vcc->stats->tx_err);
35330+ atomic_inc_unchecked(&vcc->stats->tx_err);
35331 dev_kfree_skb(skb);
35332 return -EINVAL;
35333 }
35334@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
35335
35336 err = queue_skb(card, vc, skb, oam);
35337 if (err) {
35338- atomic_inc(&vcc->stats->tx_err);
35339+ atomic_inc_unchecked(&vcc->stats->tx_err);
35340 dev_kfree_skb(skb);
35341 return err;
35342 }
35343@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
35344 skb = dev_alloc_skb(64);
35345 if (!skb) {
35346 printk("%s: Out of memory in send_oam().\n", card->name);
35347- atomic_inc(&vcc->stats->tx_err);
35348+ atomic_inc_unchecked(&vcc->stats->tx_err);
35349 return -ENOMEM;
35350 }
35351 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
35352diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
35353index 4217f29..88f547a 100644
35354--- a/drivers/atm/iphase.c
35355+++ b/drivers/atm/iphase.c
35356@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
35357 status = (u_short) (buf_desc_ptr->desc_mode);
35358 if (status & (RX_CER | RX_PTE | RX_OFL))
35359 {
35360- atomic_inc(&vcc->stats->rx_err);
35361+ atomic_inc_unchecked(&vcc->stats->rx_err);
35362 IF_ERR(printk("IA: bad packet, dropping it");)
35363 if (status & RX_CER) {
35364 IF_ERR(printk(" cause: packet CRC error\n");)
35365@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
35366 len = dma_addr - buf_addr;
35367 if (len > iadev->rx_buf_sz) {
35368 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
35369- atomic_inc(&vcc->stats->rx_err);
35370+ atomic_inc_unchecked(&vcc->stats->rx_err);
35371 goto out_free_desc;
35372 }
35373
35374@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
35375 ia_vcc = INPH_IA_VCC(vcc);
35376 if (ia_vcc == NULL)
35377 {
35378- atomic_inc(&vcc->stats->rx_err);
35379+ atomic_inc_unchecked(&vcc->stats->rx_err);
35380 atm_return(vcc, skb->truesize);
35381 dev_kfree_skb_any(skb);
35382 goto INCR_DLE;
35383@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
35384 if ((length > iadev->rx_buf_sz) || (length >
35385 (skb->len - sizeof(struct cpcs_trailer))))
35386 {
35387- atomic_inc(&vcc->stats->rx_err);
35388+ atomic_inc_unchecked(&vcc->stats->rx_err);
35389 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
35390 length, skb->len);)
35391 atm_return(vcc, skb->truesize);
35392@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
35393
35394 IF_RX(printk("rx_dle_intr: skb push");)
35395 vcc->push(vcc,skb);
35396- atomic_inc(&vcc->stats->rx);
35397+ atomic_inc_unchecked(&vcc->stats->rx);
35398 iadev->rx_pkt_cnt++;
35399 }
35400 INCR_DLE:
35401@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
35402 {
35403 struct k_sonet_stats *stats;
35404 stats = &PRIV(_ia_dev[board])->sonet_stats;
35405- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
35406- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
35407- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
35408- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
35409- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
35410- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
35411- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
35412- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
35413- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
35414+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
35415+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
35416+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
35417+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
35418+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
35419+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
35420+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
35421+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
35422+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
35423 }
35424 ia_cmds.status = 0;
35425 break;
35426@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
35427 if ((desc == 0) || (desc > iadev->num_tx_desc))
35428 {
35429 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
35430- atomic_inc(&vcc->stats->tx);
35431+ atomic_inc_unchecked(&vcc->stats->tx);
35432 if (vcc->pop)
35433 vcc->pop(vcc, skb);
35434 else
35435@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
35436 ATM_DESC(skb) = vcc->vci;
35437 skb_queue_tail(&iadev->tx_dma_q, skb);
35438
35439- atomic_inc(&vcc->stats->tx);
35440+ atomic_inc_unchecked(&vcc->stats->tx);
35441 iadev->tx_pkt_cnt++;
35442 /* Increment transaction counter */
35443 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
35444
35445 #if 0
35446 /* add flow control logic */
35447- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
35448+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
35449 if (iavcc->vc_desc_cnt > 10) {
35450 vcc->tx_quota = vcc->tx_quota * 3 / 4;
35451 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
35452diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
35453index fa7d701..1e404c7 100644
35454--- a/drivers/atm/lanai.c
35455+++ b/drivers/atm/lanai.c
35456@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
35457 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
35458 lanai_endtx(lanai, lvcc);
35459 lanai_free_skb(lvcc->tx.atmvcc, skb);
35460- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
35461+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
35462 }
35463
35464 /* Try to fill the buffer - don't call unless there is backlog */
35465@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
35466 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
35467 __net_timestamp(skb);
35468 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
35469- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
35470+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
35471 out:
35472 lvcc->rx.buf.ptr = end;
35473 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
35474@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
35475 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
35476 "vcc %d\n", lanai->number, (unsigned int) s, vci);
35477 lanai->stats.service_rxnotaal5++;
35478- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
35479+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
35480 return 0;
35481 }
35482 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
35483@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
35484 int bytes;
35485 read_unlock(&vcc_sklist_lock);
35486 DPRINTK("got trashed rx pdu on vci %d\n", vci);
35487- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
35488+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
35489 lvcc->stats.x.aal5.service_trash++;
35490 bytes = (SERVICE_GET_END(s) * 16) -
35491 (((unsigned long) lvcc->rx.buf.ptr) -
35492@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
35493 }
35494 if (s & SERVICE_STREAM) {
35495 read_unlock(&vcc_sklist_lock);
35496- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
35497+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
35498 lvcc->stats.x.aal5.service_stream++;
35499 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
35500 "PDU on VCI %d!\n", lanai->number, vci);
35501@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
35502 return 0;
35503 }
35504 DPRINTK("got rx crc error on vci %d\n", vci);
35505- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
35506+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
35507 lvcc->stats.x.aal5.service_rxcrc++;
35508 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
35509 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
35510diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
35511index 6587dc2..149833d 100644
35512--- a/drivers/atm/nicstar.c
35513+++ b/drivers/atm/nicstar.c
35514@@ -1641,7 +1641,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
35515 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
35516 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
35517 card->index);
35518- atomic_inc(&vcc->stats->tx_err);
35519+ atomic_inc_unchecked(&vcc->stats->tx_err);
35520 dev_kfree_skb_any(skb);
35521 return -EINVAL;
35522 }
35523@@ -1649,7 +1649,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
35524 if (!vc->tx) {
35525 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
35526 card->index);
35527- atomic_inc(&vcc->stats->tx_err);
35528+ atomic_inc_unchecked(&vcc->stats->tx_err);
35529 dev_kfree_skb_any(skb);
35530 return -EINVAL;
35531 }
35532@@ -1657,14 +1657,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
35533 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
35534 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
35535 card->index);
35536- atomic_inc(&vcc->stats->tx_err);
35537+ atomic_inc_unchecked(&vcc->stats->tx_err);
35538 dev_kfree_skb_any(skb);
35539 return -EINVAL;
35540 }
35541
35542 if (skb_shinfo(skb)->nr_frags != 0) {
35543 printk("nicstar%d: No scatter-gather yet.\n", card->index);
35544- atomic_inc(&vcc->stats->tx_err);
35545+ atomic_inc_unchecked(&vcc->stats->tx_err);
35546 dev_kfree_skb_any(skb);
35547 return -EINVAL;
35548 }
35549@@ -1712,11 +1712,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
35550 }
35551
35552 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
35553- atomic_inc(&vcc->stats->tx_err);
35554+ atomic_inc_unchecked(&vcc->stats->tx_err);
35555 dev_kfree_skb_any(skb);
35556 return -EIO;
35557 }
35558- atomic_inc(&vcc->stats->tx);
35559+ atomic_inc_unchecked(&vcc->stats->tx);
35560
35561 return 0;
35562 }
35563@@ -2033,14 +2033,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35564 printk
35565 ("nicstar%d: Can't allocate buffers for aal0.\n",
35566 card->index);
35567- atomic_add(i, &vcc->stats->rx_drop);
35568+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
35569 break;
35570 }
35571 if (!atm_charge(vcc, sb->truesize)) {
35572 RXPRINTK
35573 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
35574 card->index);
35575- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
35576+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
35577 dev_kfree_skb_any(sb);
35578 break;
35579 }
35580@@ -2055,7 +2055,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35581 ATM_SKB(sb)->vcc = vcc;
35582 __net_timestamp(sb);
35583 vcc->push(vcc, sb);
35584- atomic_inc(&vcc->stats->rx);
35585+ atomic_inc_unchecked(&vcc->stats->rx);
35586 cell += ATM_CELL_PAYLOAD;
35587 }
35588
35589@@ -2072,7 +2072,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35590 if (iovb == NULL) {
35591 printk("nicstar%d: Out of iovec buffers.\n",
35592 card->index);
35593- atomic_inc(&vcc->stats->rx_drop);
35594+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35595 recycle_rx_buf(card, skb);
35596 return;
35597 }
35598@@ -2096,7 +2096,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35599 small or large buffer itself. */
35600 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
35601 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
35602- atomic_inc(&vcc->stats->rx_err);
35603+ atomic_inc_unchecked(&vcc->stats->rx_err);
35604 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
35605 NS_MAX_IOVECS);
35606 NS_PRV_IOVCNT(iovb) = 0;
35607@@ -2116,7 +2116,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35608 ("nicstar%d: Expected a small buffer, and this is not one.\n",
35609 card->index);
35610 which_list(card, skb);
35611- atomic_inc(&vcc->stats->rx_err);
35612+ atomic_inc_unchecked(&vcc->stats->rx_err);
35613 recycle_rx_buf(card, skb);
35614 vc->rx_iov = NULL;
35615 recycle_iov_buf(card, iovb);
35616@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35617 ("nicstar%d: Expected a large buffer, and this is not one.\n",
35618 card->index);
35619 which_list(card, skb);
35620- atomic_inc(&vcc->stats->rx_err);
35621+ atomic_inc_unchecked(&vcc->stats->rx_err);
35622 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
35623 NS_PRV_IOVCNT(iovb));
35624 vc->rx_iov = NULL;
35625@@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35626 printk(" - PDU size mismatch.\n");
35627 else
35628 printk(".\n");
35629- atomic_inc(&vcc->stats->rx_err);
35630+ atomic_inc_unchecked(&vcc->stats->rx_err);
35631 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
35632 NS_PRV_IOVCNT(iovb));
35633 vc->rx_iov = NULL;
35634@@ -2166,7 +2166,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35635 /* skb points to a small buffer */
35636 if (!atm_charge(vcc, skb->truesize)) {
35637 push_rxbufs(card, skb);
35638- atomic_inc(&vcc->stats->rx_drop);
35639+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35640 } else {
35641 skb_put(skb, len);
35642 dequeue_sm_buf(card, skb);
35643@@ -2176,7 +2176,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35644 ATM_SKB(skb)->vcc = vcc;
35645 __net_timestamp(skb);
35646 vcc->push(vcc, skb);
35647- atomic_inc(&vcc->stats->rx);
35648+ atomic_inc_unchecked(&vcc->stats->rx);
35649 }
35650 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
35651 struct sk_buff *sb;
35652@@ -2187,7 +2187,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35653 if (len <= NS_SMBUFSIZE) {
35654 if (!atm_charge(vcc, sb->truesize)) {
35655 push_rxbufs(card, sb);
35656- atomic_inc(&vcc->stats->rx_drop);
35657+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35658 } else {
35659 skb_put(sb, len);
35660 dequeue_sm_buf(card, sb);
35661@@ -2197,7 +2197,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35662 ATM_SKB(sb)->vcc = vcc;
35663 __net_timestamp(sb);
35664 vcc->push(vcc, sb);
35665- atomic_inc(&vcc->stats->rx);
35666+ atomic_inc_unchecked(&vcc->stats->rx);
35667 }
35668
35669 push_rxbufs(card, skb);
35670@@ -2206,7 +2206,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35671
35672 if (!atm_charge(vcc, skb->truesize)) {
35673 push_rxbufs(card, skb);
35674- atomic_inc(&vcc->stats->rx_drop);
35675+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35676 } else {
35677 dequeue_lg_buf(card, skb);
35678 #ifdef NS_USE_DESTRUCTORS
35679@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35680 ATM_SKB(skb)->vcc = vcc;
35681 __net_timestamp(skb);
35682 vcc->push(vcc, skb);
35683- atomic_inc(&vcc->stats->rx);
35684+ atomic_inc_unchecked(&vcc->stats->rx);
35685 }
35686
35687 push_rxbufs(card, sb);
35688@@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35689 printk
35690 ("nicstar%d: Out of huge buffers.\n",
35691 card->index);
35692- atomic_inc(&vcc->stats->rx_drop);
35693+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35694 recycle_iovec_rx_bufs(card,
35695 (struct iovec *)
35696 iovb->data,
35697@@ -2291,7 +2291,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35698 card->hbpool.count++;
35699 } else
35700 dev_kfree_skb_any(hb);
35701- atomic_inc(&vcc->stats->rx_drop);
35702+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35703 } else {
35704 /* Copy the small buffer to the huge buffer */
35705 sb = (struct sk_buff *)iov->iov_base;
35706@@ -2328,7 +2328,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35707 #endif /* NS_USE_DESTRUCTORS */
35708 __net_timestamp(hb);
35709 vcc->push(vcc, hb);
35710- atomic_inc(&vcc->stats->rx);
35711+ atomic_inc_unchecked(&vcc->stats->rx);
35712 }
35713 }
35714
35715diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
35716index 32784d1..4a8434a 100644
35717--- a/drivers/atm/solos-pci.c
35718+++ b/drivers/atm/solos-pci.c
35719@@ -838,7 +838,7 @@ void solos_bh(unsigned long card_arg)
35720 }
35721 atm_charge(vcc, skb->truesize);
35722 vcc->push(vcc, skb);
35723- atomic_inc(&vcc->stats->rx);
35724+ atomic_inc_unchecked(&vcc->stats->rx);
35725 break;
35726
35727 case PKT_STATUS:
35728@@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
35729 vcc = SKB_CB(oldskb)->vcc;
35730
35731 if (vcc) {
35732- atomic_inc(&vcc->stats->tx);
35733+ atomic_inc_unchecked(&vcc->stats->tx);
35734 solos_pop(vcc, oldskb);
35735 } else {
35736 dev_kfree_skb_irq(oldskb);
35737diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
35738index 0215934..ce9f5b1 100644
35739--- a/drivers/atm/suni.c
35740+++ b/drivers/atm/suni.c
35741@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
35742
35743
35744 #define ADD_LIMITED(s,v) \
35745- atomic_add((v),&stats->s); \
35746- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
35747+ atomic_add_unchecked((v),&stats->s); \
35748+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
35749
35750
35751 static void suni_hz(unsigned long from_timer)
35752diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
35753index 5120a96..e2572bd 100644
35754--- a/drivers/atm/uPD98402.c
35755+++ b/drivers/atm/uPD98402.c
35756@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
35757 struct sonet_stats tmp;
35758 int error = 0;
35759
35760- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
35761+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
35762 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
35763 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
35764 if (zero && !error) {
35765@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
35766
35767
35768 #define ADD_LIMITED(s,v) \
35769- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
35770- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
35771- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
35772+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
35773+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
35774+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
35775
35776
35777 static void stat_event(struct atm_dev *dev)
35778@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
35779 if (reason & uPD98402_INT_PFM) stat_event(dev);
35780 if (reason & uPD98402_INT_PCO) {
35781 (void) GET(PCOCR); /* clear interrupt cause */
35782- atomic_add(GET(HECCT),
35783+ atomic_add_unchecked(GET(HECCT),
35784 &PRIV(dev)->sonet_stats.uncorr_hcs);
35785 }
35786 if ((reason & uPD98402_INT_RFO) &&
35787@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
35788 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
35789 uPD98402_INT_LOS),PIMR); /* enable them */
35790 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
35791- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
35792- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
35793- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
35794+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
35795+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
35796+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
35797 return 0;
35798 }
35799
35800diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
35801index 969c3c2..9b72956 100644
35802--- a/drivers/atm/zatm.c
35803+++ b/drivers/atm/zatm.c
35804@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
35805 }
35806 if (!size) {
35807 dev_kfree_skb_irq(skb);
35808- if (vcc) atomic_inc(&vcc->stats->rx_err);
35809+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
35810 continue;
35811 }
35812 if (!atm_charge(vcc,skb->truesize)) {
35813@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
35814 skb->len = size;
35815 ATM_SKB(skb)->vcc = vcc;
35816 vcc->push(vcc,skb);
35817- atomic_inc(&vcc->stats->rx);
35818+ atomic_inc_unchecked(&vcc->stats->rx);
35819 }
35820 zout(pos & 0xffff,MTA(mbx));
35821 #if 0 /* probably a stupid idea */
35822@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
35823 skb_queue_head(&zatm_vcc->backlog,skb);
35824 break;
35825 }
35826- atomic_inc(&vcc->stats->tx);
35827+ atomic_inc_unchecked(&vcc->stats->tx);
35828 wake_up(&zatm_vcc->tx_wait);
35829 }
35830
35831diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
35832index d78b204..ecc1929 100644
35833--- a/drivers/base/attribute_container.c
35834+++ b/drivers/base/attribute_container.c
35835@@ -167,7 +167,7 @@ attribute_container_add_device(struct device *dev,
35836 ic->classdev.parent = get_device(dev);
35837 ic->classdev.class = cont->class;
35838 cont->class->dev_release = attribute_container_release;
35839- dev_set_name(&ic->classdev, dev_name(dev));
35840+ dev_set_name(&ic->classdev, "%s", dev_name(dev));
35841 if (fn)
35842 fn(cont, dev, &ic->classdev);
35843 else
35844diff --git a/drivers/base/bus.c b/drivers/base/bus.c
35845index d414331..b4dd4ba 100644
35846--- a/drivers/base/bus.c
35847+++ b/drivers/base/bus.c
35848@@ -1163,7 +1163,7 @@ int subsys_interface_register(struct subsys_interface *sif)
35849 return -EINVAL;
35850
35851 mutex_lock(&subsys->p->mutex);
35852- list_add_tail(&sif->node, &subsys->p->interfaces);
35853+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
35854 if (sif->add_dev) {
35855 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
35856 while ((dev = subsys_dev_iter_next(&iter)))
35857@@ -1188,7 +1188,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
35858 subsys = sif->subsys;
35859
35860 mutex_lock(&subsys->p->mutex);
35861- list_del_init(&sif->node);
35862+ pax_list_del_init((struct list_head *)&sif->node);
35863 if (sif->remove_dev) {
35864 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
35865 while ((dev = subsys_dev_iter_next(&iter)))
35866diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
35867index 7413d06..79155fa 100644
35868--- a/drivers/base/devtmpfs.c
35869+++ b/drivers/base/devtmpfs.c
35870@@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir)
35871 if (!thread)
35872 return 0;
35873
35874- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
35875+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
35876 if (err)
35877 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
35878 else
35879@@ -380,11 +380,11 @@ static int devtmpfsd(void *p)
35880 *err = sys_unshare(CLONE_NEWNS);
35881 if (*err)
35882 goto out;
35883- *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
35884+ *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
35885 if (*err)
35886 goto out;
35887- sys_chdir("/.."); /* will traverse into overmounted root */
35888- sys_chroot(".");
35889+ sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
35890+ sys_chroot((char __force_user *)".");
35891 complete(&setup_done);
35892 while (1) {
35893 spin_lock(&req_lock);
35894diff --git a/drivers/base/node.c b/drivers/base/node.c
35895index 7616a77c..8f57f51 100644
35896--- a/drivers/base/node.c
35897+++ b/drivers/base/node.c
35898@@ -626,7 +626,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
35899 struct node_attr {
35900 struct device_attribute attr;
35901 enum node_states state;
35902-};
35903+} __do_const;
35904
35905 static ssize_t show_node_state(struct device *dev,
35906 struct device_attribute *attr, char *buf)
35907diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
35908index 7072404..76dcebd 100644
35909--- a/drivers/base/power/domain.c
35910+++ b/drivers/base/power/domain.c
35911@@ -1850,7 +1850,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
35912 {
35913 struct cpuidle_driver *cpuidle_drv;
35914 struct gpd_cpu_data *cpu_data;
35915- struct cpuidle_state *idle_state;
35916+ cpuidle_state_no_const *idle_state;
35917 int ret = 0;
35918
35919 if (IS_ERR_OR_NULL(genpd) || state < 0)
35920@@ -1918,7 +1918,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
35921 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
35922 {
35923 struct gpd_cpu_data *cpu_data;
35924- struct cpuidle_state *idle_state;
35925+ cpuidle_state_no_const *idle_state;
35926 int ret = 0;
35927
35928 if (IS_ERR_OR_NULL(genpd))
35929diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
35930index a53ebd2..8f73eeb 100644
35931--- a/drivers/base/power/sysfs.c
35932+++ b/drivers/base/power/sysfs.c
35933@@ -185,7 +185,7 @@ static ssize_t rtpm_status_show(struct device *dev,
35934 return -EIO;
35935 }
35936 }
35937- return sprintf(buf, p);
35938+ return sprintf(buf, "%s", p);
35939 }
35940
35941 static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
35942diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
35943index 79715e7..df06b3b 100644
35944--- a/drivers/base/power/wakeup.c
35945+++ b/drivers/base/power/wakeup.c
35946@@ -29,14 +29,14 @@ bool events_check_enabled __read_mostly;
35947 * They need to be modified together atomically, so it's better to use one
35948 * atomic variable to hold them both.
35949 */
35950-static atomic_t combined_event_count = ATOMIC_INIT(0);
35951+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
35952
35953 #define IN_PROGRESS_BITS (sizeof(int) * 4)
35954 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
35955
35956 static void split_counters(unsigned int *cnt, unsigned int *inpr)
35957 {
35958- unsigned int comb = atomic_read(&combined_event_count);
35959+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
35960
35961 *cnt = (comb >> IN_PROGRESS_BITS);
35962 *inpr = comb & MAX_IN_PROGRESS;
35963@@ -395,7 +395,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
35964 ws->start_prevent_time = ws->last_time;
35965
35966 /* Increment the counter of events in progress. */
35967- cec = atomic_inc_return(&combined_event_count);
35968+ cec = atomic_inc_return_unchecked(&combined_event_count);
35969
35970 trace_wakeup_source_activate(ws->name, cec);
35971 }
35972@@ -521,7 +521,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
35973 * Increment the counter of registered wakeup events and decrement the
35974 * couter of wakeup events in progress simultaneously.
35975 */
35976- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
35977+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
35978 trace_wakeup_source_deactivate(ws->name, cec);
35979
35980 split_counters(&cnt, &inpr);
35981diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
35982index e8d11b6..7b1b36f 100644
35983--- a/drivers/base/syscore.c
35984+++ b/drivers/base/syscore.c
35985@@ -21,7 +21,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
35986 void register_syscore_ops(struct syscore_ops *ops)
35987 {
35988 mutex_lock(&syscore_ops_lock);
35989- list_add_tail(&ops->node, &syscore_ops_list);
35990+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
35991 mutex_unlock(&syscore_ops_lock);
35992 }
35993 EXPORT_SYMBOL_GPL(register_syscore_ops);
35994@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
35995 void unregister_syscore_ops(struct syscore_ops *ops)
35996 {
35997 mutex_lock(&syscore_ops_lock);
35998- list_del(&ops->node);
35999+ pax_list_del((struct list_head *)&ops->node);
36000 mutex_unlock(&syscore_ops_lock);
36001 }
36002 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
36003diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
36004index 62b6c2c..4a11354 100644
36005--- a/drivers/block/cciss.c
36006+++ b/drivers/block/cciss.c
36007@@ -1189,6 +1189,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
36008 int err;
36009 u32 cp;
36010
36011+ memset(&arg64, 0, sizeof(arg64));
36012+
36013 err = 0;
36014 err |=
36015 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
36016@@ -3010,7 +3012,7 @@ static void start_io(ctlr_info_t *h)
36017 while (!list_empty(&h->reqQ)) {
36018 c = list_entry(h->reqQ.next, CommandList_struct, list);
36019 /* can't do anything if fifo is full */
36020- if ((h->access.fifo_full(h))) {
36021+ if ((h->access->fifo_full(h))) {
36022 dev_warn(&h->pdev->dev, "fifo full\n");
36023 break;
36024 }
36025@@ -3020,7 +3022,7 @@ static void start_io(ctlr_info_t *h)
36026 h->Qdepth--;
36027
36028 /* Tell the controller execute command */
36029- h->access.submit_command(h, c);
36030+ h->access->submit_command(h, c);
36031
36032 /* Put job onto the completed Q */
36033 addQ(&h->cmpQ, c);
36034@@ -3446,17 +3448,17 @@ startio:
36035
36036 static inline unsigned long get_next_completion(ctlr_info_t *h)
36037 {
36038- return h->access.command_completed(h);
36039+ return h->access->command_completed(h);
36040 }
36041
36042 static inline int interrupt_pending(ctlr_info_t *h)
36043 {
36044- return h->access.intr_pending(h);
36045+ return h->access->intr_pending(h);
36046 }
36047
36048 static inline long interrupt_not_for_us(ctlr_info_t *h)
36049 {
36050- return ((h->access.intr_pending(h) == 0) ||
36051+ return ((h->access->intr_pending(h) == 0) ||
36052 (h->interrupts_enabled == 0));
36053 }
36054
36055@@ -3489,7 +3491,7 @@ static inline u32 next_command(ctlr_info_t *h)
36056 u32 a;
36057
36058 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
36059- return h->access.command_completed(h);
36060+ return h->access->command_completed(h);
36061
36062 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
36063 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
36064@@ -4046,7 +4048,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
36065 trans_support & CFGTBL_Trans_use_short_tags);
36066
36067 /* Change the access methods to the performant access methods */
36068- h->access = SA5_performant_access;
36069+ h->access = &SA5_performant_access;
36070 h->transMethod = CFGTBL_Trans_Performant;
36071
36072 return;
36073@@ -4319,7 +4321,7 @@ static int cciss_pci_init(ctlr_info_t *h)
36074 if (prod_index < 0)
36075 return -ENODEV;
36076 h->product_name = products[prod_index].product_name;
36077- h->access = *(products[prod_index].access);
36078+ h->access = products[prod_index].access;
36079
36080 if (cciss_board_disabled(h)) {
36081 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
36082@@ -5051,7 +5053,7 @@ reinit_after_soft_reset:
36083 }
36084
36085 /* make sure the board interrupts are off */
36086- h->access.set_intr_mask(h, CCISS_INTR_OFF);
36087+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
36088 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
36089 if (rc)
36090 goto clean2;
36091@@ -5101,7 +5103,7 @@ reinit_after_soft_reset:
36092 * fake ones to scoop up any residual completions.
36093 */
36094 spin_lock_irqsave(&h->lock, flags);
36095- h->access.set_intr_mask(h, CCISS_INTR_OFF);
36096+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
36097 spin_unlock_irqrestore(&h->lock, flags);
36098 free_irq(h->intr[h->intr_mode], h);
36099 rc = cciss_request_irq(h, cciss_msix_discard_completions,
36100@@ -5121,9 +5123,9 @@ reinit_after_soft_reset:
36101 dev_info(&h->pdev->dev, "Board READY.\n");
36102 dev_info(&h->pdev->dev,
36103 "Waiting for stale completions to drain.\n");
36104- h->access.set_intr_mask(h, CCISS_INTR_ON);
36105+ h->access->set_intr_mask(h, CCISS_INTR_ON);
36106 msleep(10000);
36107- h->access.set_intr_mask(h, CCISS_INTR_OFF);
36108+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
36109
36110 rc = controller_reset_failed(h->cfgtable);
36111 if (rc)
36112@@ -5146,7 +5148,7 @@ reinit_after_soft_reset:
36113 cciss_scsi_setup(h);
36114
36115 /* Turn the interrupts on so we can service requests */
36116- h->access.set_intr_mask(h, CCISS_INTR_ON);
36117+ h->access->set_intr_mask(h, CCISS_INTR_ON);
36118
36119 /* Get the firmware version */
36120 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
36121@@ -5218,7 +5220,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
36122 kfree(flush_buf);
36123 if (return_code != IO_OK)
36124 dev_warn(&h->pdev->dev, "Error flushing cache\n");
36125- h->access.set_intr_mask(h, CCISS_INTR_OFF);
36126+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
36127 free_irq(h->intr[h->intr_mode], h);
36128 }
36129
36130diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
36131index 7fda30e..eb5dfe0 100644
36132--- a/drivers/block/cciss.h
36133+++ b/drivers/block/cciss.h
36134@@ -101,7 +101,7 @@ struct ctlr_info
36135 /* information about each logical volume */
36136 drive_info_struct *drv[CISS_MAX_LUN];
36137
36138- struct access_method access;
36139+ struct access_method *access;
36140
36141 /* queue and queue Info */
36142 struct list_head reqQ;
36143diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
36144index 639d26b..fd6ad1f 100644
36145--- a/drivers/block/cpqarray.c
36146+++ b/drivers/block/cpqarray.c
36147@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
36148 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
36149 goto Enomem4;
36150 }
36151- hba[i]->access.set_intr_mask(hba[i], 0);
36152+ hba[i]->access->set_intr_mask(hba[i], 0);
36153 if (request_irq(hba[i]->intr, do_ida_intr,
36154 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
36155 {
36156@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
36157 add_timer(&hba[i]->timer);
36158
36159 /* Enable IRQ now that spinlock and rate limit timer are set up */
36160- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
36161+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
36162
36163 for(j=0; j<NWD; j++) {
36164 struct gendisk *disk = ida_gendisk[i][j];
36165@@ -694,7 +694,7 @@ DBGINFO(
36166 for(i=0; i<NR_PRODUCTS; i++) {
36167 if (board_id == products[i].board_id) {
36168 c->product_name = products[i].product_name;
36169- c->access = *(products[i].access);
36170+ c->access = products[i].access;
36171 break;
36172 }
36173 }
36174@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
36175 hba[ctlr]->intr = intr;
36176 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
36177 hba[ctlr]->product_name = products[j].product_name;
36178- hba[ctlr]->access = *(products[j].access);
36179+ hba[ctlr]->access = products[j].access;
36180 hba[ctlr]->ctlr = ctlr;
36181 hba[ctlr]->board_id = board_id;
36182 hba[ctlr]->pci_dev = NULL; /* not PCI */
36183@@ -978,7 +978,7 @@ static void start_io(ctlr_info_t *h)
36184
36185 while((c = h->reqQ) != NULL) {
36186 /* Can't do anything if we're busy */
36187- if (h->access.fifo_full(h) == 0)
36188+ if (h->access->fifo_full(h) == 0)
36189 return;
36190
36191 /* Get the first entry from the request Q */
36192@@ -986,7 +986,7 @@ static void start_io(ctlr_info_t *h)
36193 h->Qdepth--;
36194
36195 /* Tell the controller to do our bidding */
36196- h->access.submit_command(h, c);
36197+ h->access->submit_command(h, c);
36198
36199 /* Get onto the completion Q */
36200 addQ(&h->cmpQ, c);
36201@@ -1048,7 +1048,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
36202 unsigned long flags;
36203 __u32 a,a1;
36204
36205- istat = h->access.intr_pending(h);
36206+ istat = h->access->intr_pending(h);
36207 /* Is this interrupt for us? */
36208 if (istat == 0)
36209 return IRQ_NONE;
36210@@ -1059,7 +1059,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
36211 */
36212 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
36213 if (istat & FIFO_NOT_EMPTY) {
36214- while((a = h->access.command_completed(h))) {
36215+ while((a = h->access->command_completed(h))) {
36216 a1 = a; a &= ~3;
36217 if ((c = h->cmpQ) == NULL)
36218 {
36219@@ -1193,6 +1193,7 @@ out_passthru:
36220 ida_pci_info_struct pciinfo;
36221
36222 if (!arg) return -EINVAL;
36223+ memset(&pciinfo, 0, sizeof(pciinfo));
36224 pciinfo.bus = host->pci_dev->bus->number;
36225 pciinfo.dev_fn = host->pci_dev->devfn;
36226 pciinfo.board_id = host->board_id;
36227@@ -1447,11 +1448,11 @@ static int sendcmd(
36228 /*
36229 * Disable interrupt
36230 */
36231- info_p->access.set_intr_mask(info_p, 0);
36232+ info_p->access->set_intr_mask(info_p, 0);
36233 /* Make sure there is room in the command FIFO */
36234 /* Actually it should be completely empty at this time. */
36235 for (i = 200000; i > 0; i--) {
36236- temp = info_p->access.fifo_full(info_p);
36237+ temp = info_p->access->fifo_full(info_p);
36238 if (temp != 0) {
36239 break;
36240 }
36241@@ -1464,7 +1465,7 @@ DBG(
36242 /*
36243 * Send the cmd
36244 */
36245- info_p->access.submit_command(info_p, c);
36246+ info_p->access->submit_command(info_p, c);
36247 complete = pollcomplete(ctlr);
36248
36249 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
36250@@ -1547,9 +1548,9 @@ static int revalidate_allvol(ctlr_info_t *host)
36251 * we check the new geometry. Then turn interrupts back on when
36252 * we're done.
36253 */
36254- host->access.set_intr_mask(host, 0);
36255+ host->access->set_intr_mask(host, 0);
36256 getgeometry(ctlr);
36257- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
36258+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
36259
36260 for(i=0; i<NWD; i++) {
36261 struct gendisk *disk = ida_gendisk[ctlr][i];
36262@@ -1589,7 +1590,7 @@ static int pollcomplete(int ctlr)
36263 /* Wait (up to 2 seconds) for a command to complete */
36264
36265 for (i = 200000; i > 0; i--) {
36266- done = hba[ctlr]->access.command_completed(hba[ctlr]);
36267+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
36268 if (done == 0) {
36269 udelay(10); /* a short fixed delay */
36270 } else
36271diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
36272index be73e9d..7fbf140 100644
36273--- a/drivers/block/cpqarray.h
36274+++ b/drivers/block/cpqarray.h
36275@@ -99,7 +99,7 @@ struct ctlr_info {
36276 drv_info_t drv[NWD];
36277 struct proc_dir_entry *proc;
36278
36279- struct access_method access;
36280+ struct access_method *access;
36281
36282 cmdlist_t *reqQ;
36283 cmdlist_t *cmpQ;
36284diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
36285index f943aac..99bfd19 100644
36286--- a/drivers/block/drbd/drbd_int.h
36287+++ b/drivers/block/drbd/drbd_int.h
36288@@ -582,7 +582,7 @@ struct drbd_epoch {
36289 struct drbd_tconn *tconn;
36290 struct list_head list;
36291 unsigned int barrier_nr;
36292- atomic_t epoch_size; /* increased on every request added. */
36293+ atomic_unchecked_t epoch_size; /* increased on every request added. */
36294 atomic_t active; /* increased on every req. added, and dec on every finished. */
36295 unsigned long flags;
36296 };
36297@@ -1021,7 +1021,7 @@ struct drbd_conf {
36298 unsigned int al_tr_number;
36299 int al_tr_cycle;
36300 wait_queue_head_t seq_wait;
36301- atomic_t packet_seq;
36302+ atomic_unchecked_t packet_seq;
36303 unsigned int peer_seq;
36304 spinlock_t peer_seq_lock;
36305 unsigned int minor;
36306@@ -1562,7 +1562,7 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
36307 char __user *uoptval;
36308 int err;
36309
36310- uoptval = (char __user __force *)optval;
36311+ uoptval = (char __force_user *)optval;
36312
36313 set_fs(KERNEL_DS);
36314 if (level == SOL_SOCKET)
36315diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
36316index a5dca6a..bb27967 100644
36317--- a/drivers/block/drbd/drbd_main.c
36318+++ b/drivers/block/drbd/drbd_main.c
36319@@ -1317,7 +1317,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
36320 p->sector = sector;
36321 p->block_id = block_id;
36322 p->blksize = blksize;
36323- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
36324+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
36325 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
36326 }
36327
36328@@ -1619,7 +1619,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
36329 return -EIO;
36330 p->sector = cpu_to_be64(req->i.sector);
36331 p->block_id = (unsigned long)req;
36332- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
36333+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
36334 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
36335 if (mdev->state.conn >= C_SYNC_SOURCE &&
36336 mdev->state.conn <= C_PAUSED_SYNC_T)
36337@@ -2574,8 +2574,8 @@ void conn_destroy(struct kref *kref)
36338 {
36339 struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
36340
36341- if (atomic_read(&tconn->current_epoch->epoch_size) != 0)
36342- conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
36343+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size) != 0)
36344+ conn_err(tconn, "epoch_size:%d\n", atomic_read_unchecked(&tconn->current_epoch->epoch_size));
36345 kfree(tconn->current_epoch);
36346
36347 idr_destroy(&tconn->volumes);
36348diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
36349index 4222aff..1f79506 100644
36350--- a/drivers/block/drbd/drbd_receiver.c
36351+++ b/drivers/block/drbd/drbd_receiver.c
36352@@ -834,7 +834,7 @@ int drbd_connected(struct drbd_conf *mdev)
36353 {
36354 int err;
36355
36356- atomic_set(&mdev->packet_seq, 0);
36357+ atomic_set_unchecked(&mdev->packet_seq, 0);
36358 mdev->peer_seq = 0;
36359
36360 mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
36361@@ -1193,7 +1193,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
36362 do {
36363 next_epoch = NULL;
36364
36365- epoch_size = atomic_read(&epoch->epoch_size);
36366+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
36367
36368 switch (ev & ~EV_CLEANUP) {
36369 case EV_PUT:
36370@@ -1233,7 +1233,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
36371 rv = FE_DESTROYED;
36372 } else {
36373 epoch->flags = 0;
36374- atomic_set(&epoch->epoch_size, 0);
36375+ atomic_set_unchecked(&epoch->epoch_size, 0);
36376 /* atomic_set(&epoch->active, 0); is already zero */
36377 if (rv == FE_STILL_LIVE)
36378 rv = FE_RECYCLED;
36379@@ -1451,7 +1451,7 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
36380 conn_wait_active_ee_empty(tconn);
36381 drbd_flush(tconn);
36382
36383- if (atomic_read(&tconn->current_epoch->epoch_size)) {
36384+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
36385 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
36386 if (epoch)
36387 break;
36388@@ -1464,11 +1464,11 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
36389 }
36390
36391 epoch->flags = 0;
36392- atomic_set(&epoch->epoch_size, 0);
36393+ atomic_set_unchecked(&epoch->epoch_size, 0);
36394 atomic_set(&epoch->active, 0);
36395
36396 spin_lock(&tconn->epoch_lock);
36397- if (atomic_read(&tconn->current_epoch->epoch_size)) {
36398+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
36399 list_add(&epoch->list, &tconn->current_epoch->list);
36400 tconn->current_epoch = epoch;
36401 tconn->epochs++;
36402@@ -2172,7 +2172,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
36403
36404 err = wait_for_and_update_peer_seq(mdev, peer_seq);
36405 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
36406- atomic_inc(&tconn->current_epoch->epoch_size);
36407+ atomic_inc_unchecked(&tconn->current_epoch->epoch_size);
36408 err2 = drbd_drain_block(mdev, pi->size);
36409 if (!err)
36410 err = err2;
36411@@ -2206,7 +2206,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
36412
36413 spin_lock(&tconn->epoch_lock);
36414 peer_req->epoch = tconn->current_epoch;
36415- atomic_inc(&peer_req->epoch->epoch_size);
36416+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
36417 atomic_inc(&peer_req->epoch->active);
36418 spin_unlock(&tconn->epoch_lock);
36419
36420@@ -4347,7 +4347,7 @@ struct data_cmd {
36421 int expect_payload;
36422 size_t pkt_size;
36423 int (*fn)(struct drbd_tconn *, struct packet_info *);
36424-};
36425+} __do_const;
36426
36427 static struct data_cmd drbd_cmd_handler[] = {
36428 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
36429@@ -4467,7 +4467,7 @@ static void conn_disconnect(struct drbd_tconn *tconn)
36430 if (!list_empty(&tconn->current_epoch->list))
36431 conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
36432 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
36433- atomic_set(&tconn->current_epoch->epoch_size, 0);
36434+ atomic_set_unchecked(&tconn->current_epoch->epoch_size, 0);
36435 tconn->send.seen_any_write_yet = false;
36436
36437 conn_info(tconn, "Connection closed\n");
36438@@ -5223,7 +5223,7 @@ static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
36439 struct asender_cmd {
36440 size_t pkt_size;
36441 int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
36442-};
36443+} __do_const;
36444
36445 static struct asender_cmd asender_tbl[] = {
36446 [P_PING] = { 0, got_Ping },
36447diff --git a/drivers/block/loop.c b/drivers/block/loop.c
36448index d92d50f..a7e9d97 100644
36449--- a/drivers/block/loop.c
36450+++ b/drivers/block/loop.c
36451@@ -232,7 +232,7 @@ static int __do_lo_send_write(struct file *file,
36452
36453 file_start_write(file);
36454 set_fs(get_ds());
36455- bw = file->f_op->write(file, buf, len, &pos);
36456+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
36457 set_fs(old_fs);
36458 file_end_write(file);
36459 if (likely(bw == len))
36460diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
36461index f5d0ea1..c62380a 100644
36462--- a/drivers/block/pktcdvd.c
36463+++ b/drivers/block/pktcdvd.c
36464@@ -84,7 +84,7 @@
36465 #define MAX_SPEED 0xffff
36466
36467 #define ZONE(sector, pd) (((sector) + (pd)->offset) & \
36468- ~(sector_t)((pd)->settings.size - 1))
36469+ ~(sector_t)((pd)->settings.size - 1UL))
36470
36471 static DEFINE_MUTEX(pktcdvd_mutex);
36472 static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
36473diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
36474index 8a3aff7..d7538c2 100644
36475--- a/drivers/cdrom/cdrom.c
36476+++ b/drivers/cdrom/cdrom.c
36477@@ -416,7 +416,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
36478 ENSURE(reset, CDC_RESET);
36479 ENSURE(generic_packet, CDC_GENERIC_PACKET);
36480 cdi->mc_flags = 0;
36481- cdo->n_minors = 0;
36482 cdi->options = CDO_USE_FFLAGS;
36483
36484 if (autoclose==1 && CDROM_CAN(CDC_CLOSE_TRAY))
36485@@ -436,8 +435,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
36486 else
36487 cdi->cdda_method = CDDA_OLD;
36488
36489- if (!cdo->generic_packet)
36490- cdo->generic_packet = cdrom_dummy_generic_packet;
36491+ if (!cdo->generic_packet) {
36492+ pax_open_kernel();
36493+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
36494+ pax_close_kernel();
36495+ }
36496
36497 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
36498 mutex_lock(&cdrom_mutex);
36499@@ -458,7 +460,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
36500 if (cdi->exit)
36501 cdi->exit(cdi);
36502
36503- cdi->ops->n_minors--;
36504 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
36505 }
36506
36507@@ -2107,7 +2108,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
36508 */
36509 nr = nframes;
36510 do {
36511- cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
36512+ cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
36513 if (cgc.buffer)
36514 break;
36515
36516@@ -3429,7 +3430,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
36517 struct cdrom_device_info *cdi;
36518 int ret;
36519
36520- ret = scnprintf(info + *pos, max_size - *pos, header);
36521+ ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
36522 if (!ret)
36523 return 1;
36524
36525diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
36526index 4afcb65..a68a32d 100644
36527--- a/drivers/cdrom/gdrom.c
36528+++ b/drivers/cdrom/gdrom.c
36529@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
36530 .audio_ioctl = gdrom_audio_ioctl,
36531 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
36532 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
36533- .n_minors = 1,
36534 };
36535
36536 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
36537diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
36538index 3bb6fa3..34013fb 100644
36539--- a/drivers/char/Kconfig
36540+++ b/drivers/char/Kconfig
36541@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
36542
36543 config DEVKMEM
36544 bool "/dev/kmem virtual device support"
36545- default y
36546+ default n
36547+ depends on !GRKERNSEC_KMEM
36548 help
36549 Say Y here if you want to support the /dev/kmem device. The
36550 /dev/kmem device is rarely used, but can be used for certain
36551@@ -582,6 +583,7 @@ config DEVPORT
36552 bool
36553 depends on !M68K
36554 depends on ISA || PCI
36555+ depends on !GRKERNSEC_KMEM
36556 default y
36557
36558 source "drivers/s390/char/Kconfig"
36559diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
36560index a48e05b..6bac831 100644
36561--- a/drivers/char/agp/compat_ioctl.c
36562+++ b/drivers/char/agp/compat_ioctl.c
36563@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
36564 return -ENOMEM;
36565 }
36566
36567- if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
36568+ if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
36569 sizeof(*usegment) * ureserve.seg_count)) {
36570 kfree(usegment);
36571 kfree(ksegment);
36572diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
36573index 2e04433..771f2cc 100644
36574--- a/drivers/char/agp/frontend.c
36575+++ b/drivers/char/agp/frontend.c
36576@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
36577 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
36578 return -EFAULT;
36579
36580- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
36581+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
36582 return -EFAULT;
36583
36584 client = agp_find_client_by_pid(reserve.pid);
36585@@ -847,7 +847,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
36586 if (segment == NULL)
36587 return -ENOMEM;
36588
36589- if (copy_from_user(segment, (void __user *) reserve.seg_list,
36590+ if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
36591 sizeof(struct agp_segment) * reserve.seg_count)) {
36592 kfree(segment);
36593 return -EFAULT;
36594diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
36595index 4f94375..413694e 100644
36596--- a/drivers/char/genrtc.c
36597+++ b/drivers/char/genrtc.c
36598@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
36599 switch (cmd) {
36600
36601 case RTC_PLL_GET:
36602+ memset(&pll, 0, sizeof(pll));
36603 if (get_rtc_pll(&pll))
36604 return -EINVAL;
36605 else
36606diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
36607index d784650..e8bfd69 100644
36608--- a/drivers/char/hpet.c
36609+++ b/drivers/char/hpet.c
36610@@ -559,7 +559,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
36611 }
36612
36613 static int
36614-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
36615+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
36616 struct hpet_info *info)
36617 {
36618 struct hpet_timer __iomem *timer;
36619diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
36620index 86fe45c..c0ea948 100644
36621--- a/drivers/char/hw_random/intel-rng.c
36622+++ b/drivers/char/hw_random/intel-rng.c
36623@@ -314,7 +314,7 @@ PFX "RNG, try using the 'no_fwh_detect' option.\n";
36624
36625 if (no_fwh_detect)
36626 return -ENODEV;
36627- printk(warning);
36628+ printk("%s", warning);
36629 return -EBUSY;
36630 }
36631
36632diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
36633index 4445fa1..7c6de37 100644
36634--- a/drivers/char/ipmi/ipmi_msghandler.c
36635+++ b/drivers/char/ipmi/ipmi_msghandler.c
36636@@ -420,7 +420,7 @@ struct ipmi_smi {
36637 struct proc_dir_entry *proc_dir;
36638 char proc_dir_name[10];
36639
36640- atomic_t stats[IPMI_NUM_STATS];
36641+ atomic_unchecked_t stats[IPMI_NUM_STATS];
36642
36643 /*
36644 * run_to_completion duplicate of smb_info, smi_info
36645@@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
36646
36647
36648 #define ipmi_inc_stat(intf, stat) \
36649- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
36650+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
36651 #define ipmi_get_stat(intf, stat) \
36652- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
36653+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
36654
36655 static int is_lan_addr(struct ipmi_addr *addr)
36656 {
36657@@ -2883,7 +2883,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
36658 INIT_LIST_HEAD(&intf->cmd_rcvrs);
36659 init_waitqueue_head(&intf->waitq);
36660 for (i = 0; i < IPMI_NUM_STATS; i++)
36661- atomic_set(&intf->stats[i], 0);
36662+ atomic_set_unchecked(&intf->stats[i], 0);
36663
36664 intf->proc_dir = NULL;
36665
36666diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
36667index af4b23f..79806fc 100644
36668--- a/drivers/char/ipmi/ipmi_si_intf.c
36669+++ b/drivers/char/ipmi/ipmi_si_intf.c
36670@@ -275,7 +275,7 @@ struct smi_info {
36671 unsigned char slave_addr;
36672
36673 /* Counters and things for the proc filesystem. */
36674- atomic_t stats[SI_NUM_STATS];
36675+ atomic_unchecked_t stats[SI_NUM_STATS];
36676
36677 struct task_struct *thread;
36678
36679@@ -284,9 +284,9 @@ struct smi_info {
36680 };
36681
36682 #define smi_inc_stat(smi, stat) \
36683- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
36684+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
36685 #define smi_get_stat(smi, stat) \
36686- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
36687+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
36688
36689 #define SI_MAX_PARMS 4
36690
36691@@ -3258,7 +3258,7 @@ static int try_smi_init(struct smi_info *new_smi)
36692 atomic_set(&new_smi->req_events, 0);
36693 new_smi->run_to_completion = 0;
36694 for (i = 0; i < SI_NUM_STATS; i++)
36695- atomic_set(&new_smi->stats[i], 0);
36696+ atomic_set_unchecked(&new_smi->stats[i], 0);
36697
36698 new_smi->interrupt_disabled = 1;
36699 atomic_set(&new_smi->stop_operation, 0);
36700diff --git a/drivers/char/mem.c b/drivers/char/mem.c
36701index 1ccbe94..6ad651a 100644
36702--- a/drivers/char/mem.c
36703+++ b/drivers/char/mem.c
36704@@ -18,6 +18,7 @@
36705 #include <linux/raw.h>
36706 #include <linux/tty.h>
36707 #include <linux/capability.h>
36708+#include <linux/security.h>
36709 #include <linux/ptrace.h>
36710 #include <linux/device.h>
36711 #include <linux/highmem.h>
36712@@ -38,6 +39,10 @@
36713
36714 #define DEVPORT_MINOR 4
36715
36716+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
36717+extern const struct file_operations grsec_fops;
36718+#endif
36719+
36720 static inline unsigned long size_inside_page(unsigned long start,
36721 unsigned long size)
36722 {
36723@@ -69,9 +74,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
36724
36725 while (cursor < to) {
36726 if (!devmem_is_allowed(pfn)) {
36727+#ifdef CONFIG_GRKERNSEC_KMEM
36728+ gr_handle_mem_readwrite(from, to);
36729+#else
36730 printk(KERN_INFO
36731 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
36732 current->comm, from, to);
36733+#endif
36734 return 0;
36735 }
36736 cursor += PAGE_SIZE;
36737@@ -79,6 +88,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
36738 }
36739 return 1;
36740 }
36741+#elif defined(CONFIG_GRKERNSEC_KMEM)
36742+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
36743+{
36744+ return 0;
36745+}
36746 #else
36747 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
36748 {
36749@@ -121,6 +135,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
36750
36751 while (count > 0) {
36752 unsigned long remaining;
36753+ char *temp;
36754
36755 sz = size_inside_page(p, count);
36756
36757@@ -136,7 +151,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
36758 if (!ptr)
36759 return -EFAULT;
36760
36761- remaining = copy_to_user(buf, ptr, sz);
36762+#ifdef CONFIG_PAX_USERCOPY
36763+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
36764+ if (!temp) {
36765+ unxlate_dev_mem_ptr(p, ptr);
36766+ return -ENOMEM;
36767+ }
36768+ memcpy(temp, ptr, sz);
36769+#else
36770+ temp = ptr;
36771+#endif
36772+
36773+ remaining = copy_to_user(buf, temp, sz);
36774+
36775+#ifdef CONFIG_PAX_USERCOPY
36776+ kfree(temp);
36777+#endif
36778+
36779 unxlate_dev_mem_ptr(p, ptr);
36780 if (remaining)
36781 return -EFAULT;
36782@@ -379,7 +410,7 @@ static ssize_t read_oldmem(struct file *file, char __user *buf,
36783 else
36784 csize = count;
36785
36786- rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
36787+ rc = copy_oldmem_page(pfn, (char __force_kernel *)buf, csize, offset, 1);
36788 if (rc < 0)
36789 return rc;
36790 buf += csize;
36791@@ -399,9 +430,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
36792 size_t count, loff_t *ppos)
36793 {
36794 unsigned long p = *ppos;
36795- ssize_t low_count, read, sz;
36796+ ssize_t low_count, read, sz, err = 0;
36797 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
36798- int err = 0;
36799
36800 read = 0;
36801 if (p < (unsigned long) high_memory) {
36802@@ -423,6 +453,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
36803 }
36804 #endif
36805 while (low_count > 0) {
36806+ char *temp;
36807+
36808 sz = size_inside_page(p, low_count);
36809
36810 /*
36811@@ -432,7 +464,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
36812 */
36813 kbuf = xlate_dev_kmem_ptr((char *)p);
36814
36815- if (copy_to_user(buf, kbuf, sz))
36816+#ifdef CONFIG_PAX_USERCOPY
36817+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
36818+ if (!temp)
36819+ return -ENOMEM;
36820+ memcpy(temp, kbuf, sz);
36821+#else
36822+ temp = kbuf;
36823+#endif
36824+
36825+ err = copy_to_user(buf, temp, sz);
36826+
36827+#ifdef CONFIG_PAX_USERCOPY
36828+ kfree(temp);
36829+#endif
36830+
36831+ if (err)
36832 return -EFAULT;
36833 buf += sz;
36834 p += sz;
36835@@ -869,6 +916,9 @@ static const struct memdev {
36836 #ifdef CONFIG_CRASH_DUMP
36837 [12] = { "oldmem", 0, &oldmem_fops, NULL },
36838 #endif
36839+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
36840+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
36841+#endif
36842 };
36843
36844 static int memory_open(struct inode *inode, struct file *filp)
36845@@ -940,7 +990,7 @@ static int __init chr_dev_init(void)
36846 continue;
36847
36848 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
36849- NULL, devlist[minor].name);
36850+ NULL, "%s", devlist[minor].name);
36851 }
36852
36853 return tty_init();
36854diff --git a/drivers/char/mwave/tp3780i.c b/drivers/char/mwave/tp3780i.c
36855index c689697..04e6d6a2 100644
36856--- a/drivers/char/mwave/tp3780i.c
36857+++ b/drivers/char/mwave/tp3780i.c
36858@@ -479,6 +479,7 @@ int tp3780I_QueryAbilities(THINKPAD_BD_DATA * pBDData, MW_ABILITIES * pAbilities
36859 PRINTK_2(TRACE_TP3780I,
36860 "tp3780i::tp3780I_QueryAbilities entry pBDData %p\n", pBDData);
36861
36862+ memset(pAbilities, 0, sizeof(*pAbilities));
36863 /* fill out standard constant fields */
36864 pAbilities->instr_per_sec = pBDData->rDspSettings.uIps;
36865 pAbilities->data_size = pBDData->rDspSettings.uDStoreSize;
36866diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
36867index 9df78e2..01ba9ae 100644
36868--- a/drivers/char/nvram.c
36869+++ b/drivers/char/nvram.c
36870@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
36871
36872 spin_unlock_irq(&rtc_lock);
36873
36874- if (copy_to_user(buf, contents, tmp - contents))
36875+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
36876 return -EFAULT;
36877
36878 *ppos = i;
36879diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
36880index 5c5cc00..ac9edb7 100644
36881--- a/drivers/char/pcmcia/synclink_cs.c
36882+++ b/drivers/char/pcmcia/synclink_cs.c
36883@@ -2345,9 +2345,9 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
36884
36885 if (debug_level >= DEBUG_LEVEL_INFO)
36886 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
36887- __FILE__, __LINE__, info->device_name, port->count);
36888+ __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
36889
36890- WARN_ON(!port->count);
36891+ WARN_ON(!atomic_read(&port->count));
36892
36893 if (tty_port_close_start(port, tty, filp) == 0)
36894 goto cleanup;
36895@@ -2365,7 +2365,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
36896 cleanup:
36897 if (debug_level >= DEBUG_LEVEL_INFO)
36898 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
36899- tty->driver->name, port->count);
36900+ tty->driver->name, atomic_read(&port->count));
36901 }
36902
36903 /* Wait until the transmitter is empty.
36904@@ -2507,7 +2507,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
36905
36906 if (debug_level >= DEBUG_LEVEL_INFO)
36907 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
36908- __FILE__, __LINE__, tty->driver->name, port->count);
36909+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
36910
36911 /* If port is closing, signal caller to try again */
36912 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
36913@@ -2527,11 +2527,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
36914 goto cleanup;
36915 }
36916 spin_lock(&port->lock);
36917- port->count++;
36918+ atomic_inc(&port->count);
36919 spin_unlock(&port->lock);
36920 spin_unlock_irqrestore(&info->netlock, flags);
36921
36922- if (port->count == 1) {
36923+ if (atomic_read(&port->count) == 1) {
36924 /* 1st open on this device, init hardware */
36925 retval = startup(info, tty);
36926 if (retval < 0)
36927@@ -3920,7 +3920,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
36928 unsigned short new_crctype;
36929
36930 /* return error if TTY interface open */
36931- if (info->port.count)
36932+ if (atomic_read(&info->port.count))
36933 return -EBUSY;
36934
36935 switch (encoding)
36936@@ -4024,7 +4024,7 @@ static int hdlcdev_open(struct net_device *dev)
36937
36938 /* arbitrate between network and tty opens */
36939 spin_lock_irqsave(&info->netlock, flags);
36940- if (info->port.count != 0 || info->netcount != 0) {
36941+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
36942 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
36943 spin_unlock_irqrestore(&info->netlock, flags);
36944 return -EBUSY;
36945@@ -4114,7 +4114,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
36946 printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
36947
36948 /* return error if TTY interface open */
36949- if (info->port.count)
36950+ if (atomic_read(&info->port.count))
36951 return -EBUSY;
36952
36953 if (cmd != SIOCWANDEV)
36954diff --git a/drivers/char/random.c b/drivers/char/random.c
36955index 35487e8..dac8bd1 100644
36956--- a/drivers/char/random.c
36957+++ b/drivers/char/random.c
36958@@ -272,8 +272,13 @@
36959 /*
36960 * Configuration information
36961 */
36962+#ifdef CONFIG_GRKERNSEC_RANDNET
36963+#define INPUT_POOL_WORDS 512
36964+#define OUTPUT_POOL_WORDS 128
36965+#else
36966 #define INPUT_POOL_WORDS 128
36967 #define OUTPUT_POOL_WORDS 32
36968+#endif
36969 #define SEC_XFER_SIZE 512
36970 #define EXTRACT_SIZE 10
36971
36972@@ -313,10 +318,17 @@ static struct poolinfo {
36973 int poolwords;
36974 int tap1, tap2, tap3, tap4, tap5;
36975 } poolinfo_table[] = {
36976+#ifdef CONFIG_GRKERNSEC_RANDNET
36977+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
36978+ { 512, 411, 308, 208, 104, 1 },
36979+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
36980+ { 128, 103, 76, 51, 25, 1 },
36981+#else
36982 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
36983 { 128, 103, 76, 51, 25, 1 },
36984 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
36985 { 32, 26, 20, 14, 7, 1 },
36986+#endif
36987 #if 0
36988 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
36989 { 2048, 1638, 1231, 819, 411, 1 },
36990@@ -524,8 +536,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
36991 input_rotate += i ? 7 : 14;
36992 }
36993
36994- ACCESS_ONCE(r->input_rotate) = input_rotate;
36995- ACCESS_ONCE(r->add_ptr) = i;
36996+ ACCESS_ONCE_RW(r->input_rotate) = input_rotate;
36997+ ACCESS_ONCE_RW(r->add_ptr) = i;
36998 smp_wmb();
36999
37000 if (out)
37001@@ -1032,7 +1044,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
37002
37003 extract_buf(r, tmp);
37004 i = min_t(int, nbytes, EXTRACT_SIZE);
37005- if (copy_to_user(buf, tmp, i)) {
37006+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
37007 ret = -EFAULT;
37008 break;
37009 }
37010@@ -1368,7 +1380,7 @@ EXPORT_SYMBOL(generate_random_uuid);
37011 #include <linux/sysctl.h>
37012
37013 static int min_read_thresh = 8, min_write_thresh;
37014-static int max_read_thresh = INPUT_POOL_WORDS * 32;
37015+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
37016 static int max_write_thresh = INPUT_POOL_WORDS * 32;
37017 static char sysctl_bootid[16];
37018
37019@@ -1384,7 +1396,7 @@ static char sysctl_bootid[16];
37020 static int proc_do_uuid(ctl_table *table, int write,
37021 void __user *buffer, size_t *lenp, loff_t *ppos)
37022 {
37023- ctl_table fake_table;
37024+ ctl_table_no_const fake_table;
37025 unsigned char buf[64], tmp_uuid[16], *uuid;
37026
37027 uuid = table->data;
37028diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
37029index bf2349db..5456d53 100644
37030--- a/drivers/char/sonypi.c
37031+++ b/drivers/char/sonypi.c
37032@@ -54,6 +54,7 @@
37033
37034 #include <asm/uaccess.h>
37035 #include <asm/io.h>
37036+#include <asm/local.h>
37037
37038 #include <linux/sonypi.h>
37039
37040@@ -490,7 +491,7 @@ static struct sonypi_device {
37041 spinlock_t fifo_lock;
37042 wait_queue_head_t fifo_proc_list;
37043 struct fasync_struct *fifo_async;
37044- int open_count;
37045+ local_t open_count;
37046 int model;
37047 struct input_dev *input_jog_dev;
37048 struct input_dev *input_key_dev;
37049@@ -897,7 +898,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
37050 static int sonypi_misc_release(struct inode *inode, struct file *file)
37051 {
37052 mutex_lock(&sonypi_device.lock);
37053- sonypi_device.open_count--;
37054+ local_dec(&sonypi_device.open_count);
37055 mutex_unlock(&sonypi_device.lock);
37056 return 0;
37057 }
37058@@ -906,9 +907,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
37059 {
37060 mutex_lock(&sonypi_device.lock);
37061 /* Flush input queue on first open */
37062- if (!sonypi_device.open_count)
37063+ if (!local_read(&sonypi_device.open_count))
37064 kfifo_reset(&sonypi_device.fifo);
37065- sonypi_device.open_count++;
37066+ local_inc(&sonypi_device.open_count);
37067 mutex_unlock(&sonypi_device.lock);
37068
37069 return 0;
37070diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
37071index 64420b3..5c40b56 100644
37072--- a/drivers/char/tpm/tpm_acpi.c
37073+++ b/drivers/char/tpm/tpm_acpi.c
37074@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
37075 virt = acpi_os_map_memory(start, len);
37076 if (!virt) {
37077 kfree(log->bios_event_log);
37078+ log->bios_event_log = NULL;
37079 printk("%s: ERROR - Unable to map memory\n", __func__);
37080 return -EIO;
37081 }
37082
37083- memcpy_fromio(log->bios_event_log, virt, len);
37084+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
37085
37086 acpi_os_unmap_memory(virt, len);
37087 return 0;
37088diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
37089index 84ddc55..1d32f1e 100644
37090--- a/drivers/char/tpm/tpm_eventlog.c
37091+++ b/drivers/char/tpm/tpm_eventlog.c
37092@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
37093 event = addr;
37094
37095 if ((event->event_type == 0 && event->event_size == 0) ||
37096- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
37097+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
37098 return NULL;
37099
37100 return addr;
37101@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
37102 return NULL;
37103
37104 if ((event->event_type == 0 && event->event_size == 0) ||
37105- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
37106+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
37107 return NULL;
37108
37109 (*pos)++;
37110@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
37111 int i;
37112
37113 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
37114- seq_putc(m, data[i]);
37115+ if (!seq_putc(m, data[i]))
37116+ return -EFAULT;
37117
37118 return 0;
37119 }
37120diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
37121index fc45567..fa2a590 100644
37122--- a/drivers/char/virtio_console.c
37123+++ b/drivers/char/virtio_console.c
37124@@ -682,7 +682,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
37125 if (to_user) {
37126 ssize_t ret;
37127
37128- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
37129+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
37130 if (ret)
37131 return -EFAULT;
37132 } else {
37133@@ -785,7 +785,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
37134 if (!port_has_data(port) && !port->host_connected)
37135 return 0;
37136
37137- return fill_readbuf(port, ubuf, count, true);
37138+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
37139 }
37140
37141 static int wait_port_writable(struct port *port, bool nonblock)
37142diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
37143index a33f46f..a720eed 100644
37144--- a/drivers/clk/clk-composite.c
37145+++ b/drivers/clk/clk-composite.c
37146@@ -122,7 +122,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
37147 struct clk *clk;
37148 struct clk_init_data init;
37149 struct clk_composite *composite;
37150- struct clk_ops *clk_composite_ops;
37151+ clk_ops_no_const *clk_composite_ops;
37152
37153 composite = kzalloc(sizeof(*composite), GFP_KERNEL);
37154 if (!composite) {
37155diff --git a/drivers/clk/socfpga/clk.c b/drivers/clk/socfpga/clk.c
37156index bd11315..7f87098 100644
37157--- a/drivers/clk/socfpga/clk.c
37158+++ b/drivers/clk/socfpga/clk.c
37159@@ -22,6 +22,7 @@
37160 #include <linux/clk-provider.h>
37161 #include <linux/io.h>
37162 #include <linux/of.h>
37163+#include <asm/pgtable.h>
37164
37165 /* Clock Manager offsets */
37166 #define CLKMGR_CTRL 0x0
37167@@ -135,8 +136,10 @@ static __init struct clk *socfpga_clk_init(struct device_node *node,
37168 if (strcmp(clk_name, "main_pll") || strcmp(clk_name, "periph_pll") ||
37169 strcmp(clk_name, "sdram_pll")) {
37170 socfpga_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
37171- clk_pll_ops.enable = clk_gate_ops.enable;
37172- clk_pll_ops.disable = clk_gate_ops.disable;
37173+ pax_open_kernel();
37174+ *(void **)&clk_pll_ops.enable = clk_gate_ops.enable;
37175+ *(void **)&clk_pll_ops.disable = clk_gate_ops.disable;
37176+ pax_close_kernel();
37177 }
37178
37179 clk = clk_register(NULL, &socfpga_clk->hw.hw);
37180diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
37181index a2b2541..bc1e7ff 100644
37182--- a/drivers/clocksource/arm_arch_timer.c
37183+++ b/drivers/clocksource/arm_arch_timer.c
37184@@ -264,7 +264,7 @@ static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
37185 return NOTIFY_OK;
37186 }
37187
37188-static struct notifier_block arch_timer_cpu_nb __cpuinitdata = {
37189+static struct notifier_block arch_timer_cpu_nb = {
37190 .notifier_call = arch_timer_cpu_notify,
37191 };
37192
37193diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c
37194index 350f493..489479e 100644
37195--- a/drivers/clocksource/bcm_kona_timer.c
37196+++ b/drivers/clocksource/bcm_kona_timer.c
37197@@ -199,7 +199,7 @@ static struct irqaction kona_timer_irq = {
37198 .handler = kona_timer_interrupt,
37199 };
37200
37201-static void __init kona_timer_init(void)
37202+static void __init kona_timer_init(struct device_node *np)
37203 {
37204 kona_timers_init();
37205 kona_timer_clockevents_init();
37206diff --git a/drivers/clocksource/metag_generic.c b/drivers/clocksource/metag_generic.c
37207index ade7513..069445f 100644
37208--- a/drivers/clocksource/metag_generic.c
37209+++ b/drivers/clocksource/metag_generic.c
37210@@ -169,7 +169,7 @@ static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
37211 return NOTIFY_OK;
37212 }
37213
37214-static struct notifier_block __cpuinitdata arch_timer_cpu_nb = {
37215+static struct notifier_block arch_timer_cpu_nb = {
37216 .notifier_call = arch_timer_cpu_notify,
37217 };
37218
37219diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
37220index edc089e..bc7c0bc 100644
37221--- a/drivers/cpufreq/acpi-cpufreq.c
37222+++ b/drivers/cpufreq/acpi-cpufreq.c
37223@@ -172,7 +172,7 @@ static ssize_t show_global_boost(struct kobject *kobj,
37224 return sprintf(buf, "%u\n", boost_enabled);
37225 }
37226
37227-static struct global_attr global_boost = __ATTR(boost, 0644,
37228+static global_attr_no_const global_boost = __ATTR(boost, 0644,
37229 show_global_boost,
37230 store_global_boost);
37231
37232@@ -705,8 +705,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
37233 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
37234 per_cpu(acfreq_data, cpu) = data;
37235
37236- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
37237- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
37238+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
37239+ pax_open_kernel();
37240+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
37241+ pax_close_kernel();
37242+ }
37243
37244 result = acpi_processor_register_performance(data->acpi_data, cpu);
37245 if (result)
37246@@ -832,7 +835,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
37247 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
37248 break;
37249 case ACPI_ADR_SPACE_FIXED_HARDWARE:
37250- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
37251+ pax_open_kernel();
37252+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
37253+ pax_close_kernel();
37254 policy->cur = get_cur_freq_on_cpu(cpu);
37255 break;
37256 default:
37257@@ -843,8 +848,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
37258 acpi_processor_notify_smm(THIS_MODULE);
37259
37260 /* Check for APERF/MPERF support in hardware */
37261- if (boot_cpu_has(X86_FEATURE_APERFMPERF))
37262- acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
37263+ if (boot_cpu_has(X86_FEATURE_APERFMPERF)) {
37264+ pax_open_kernel();
37265+ *(void **)&acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
37266+ pax_close_kernel();
37267+ }
37268
37269 pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
37270 for (i = 0; i < perf->state_count; i++)
37271diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
37272index 6485547..477033e 100644
37273--- a/drivers/cpufreq/cpufreq.c
37274+++ b/drivers/cpufreq/cpufreq.c
37275@@ -1854,7 +1854,7 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
37276 return NOTIFY_OK;
37277 }
37278
37279-static struct notifier_block __refdata cpufreq_cpu_notifier = {
37280+static struct notifier_block cpufreq_cpu_notifier = {
37281 .notifier_call = cpufreq_cpu_callback,
37282 };
37283
37284@@ -1886,8 +1886,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
37285
37286 pr_debug("trying to register driver %s\n", driver_data->name);
37287
37288- if (driver_data->setpolicy)
37289- driver_data->flags |= CPUFREQ_CONST_LOOPS;
37290+ if (driver_data->setpolicy) {
37291+ pax_open_kernel();
37292+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
37293+ pax_close_kernel();
37294+ }
37295
37296 write_lock_irqsave(&cpufreq_driver_lock, flags);
37297 if (cpufreq_driver) {
37298diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
37299index a86ff72..aad2b03 100644
37300--- a/drivers/cpufreq/cpufreq_governor.c
37301+++ b/drivers/cpufreq/cpufreq_governor.c
37302@@ -235,7 +235,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
37303 struct dbs_data *dbs_data;
37304 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
37305 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
37306- struct od_ops *od_ops = NULL;
37307+ const struct od_ops *od_ops = NULL;
37308 struct od_dbs_tuners *od_tuners = NULL;
37309 struct cs_dbs_tuners *cs_tuners = NULL;
37310 struct cpu_dbs_common_info *cpu_cdbs;
37311@@ -298,7 +298,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
37312
37313 if ((cdata->governor == GOV_CONSERVATIVE) &&
37314 (!policy->governor->initialized)) {
37315- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
37316+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
37317
37318 cpufreq_register_notifier(cs_ops->notifier_block,
37319 CPUFREQ_TRANSITION_NOTIFIER);
37320@@ -315,7 +315,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
37321
37322 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
37323 (policy->governor->initialized == 1)) {
37324- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
37325+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
37326
37327 cpufreq_unregister_notifier(cs_ops->notifier_block,
37328 CPUFREQ_TRANSITION_NOTIFIER);
37329diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
37330index 0d9e6be..461fd3b 100644
37331--- a/drivers/cpufreq/cpufreq_governor.h
37332+++ b/drivers/cpufreq/cpufreq_governor.h
37333@@ -204,7 +204,7 @@ struct common_dbs_data {
37334 void (*exit)(struct dbs_data *dbs_data);
37335
37336 /* Governor specific ops, see below */
37337- void *gov_ops;
37338+ const void *gov_ops;
37339 };
37340
37341 /* Governer Per policy data */
37342diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
37343index c087347..dad6268 100644
37344--- a/drivers/cpufreq/cpufreq_ondemand.c
37345+++ b/drivers/cpufreq/cpufreq_ondemand.c
37346@@ -615,14 +615,18 @@ void od_register_powersave_bias_handler(unsigned int (*f)
37347 (struct cpufreq_policy *, unsigned int, unsigned int),
37348 unsigned int powersave_bias)
37349 {
37350- od_ops.powersave_bias_target = f;
37351+ pax_open_kernel();
37352+ *(void **)&od_ops.powersave_bias_target = f;
37353+ pax_close_kernel();
37354 od_set_powersave_bias(powersave_bias);
37355 }
37356 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
37357
37358 void od_unregister_powersave_bias_handler(void)
37359 {
37360- od_ops.powersave_bias_target = generic_powersave_bias_target;
37361+ pax_open_kernel();
37362+ *(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target;
37363+ pax_close_kernel();
37364 od_set_powersave_bias(0);
37365 }
37366 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
37367diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
37368index bfd6273..e39dd63 100644
37369--- a/drivers/cpufreq/cpufreq_stats.c
37370+++ b/drivers/cpufreq/cpufreq_stats.c
37371@@ -365,7 +365,7 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
37372 }
37373
37374 /* priority=1 so this will get called before cpufreq_remove_dev */
37375-static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
37376+static struct notifier_block cpufreq_stat_cpu_notifier = {
37377 .notifier_call = cpufreq_stat_cpu_callback,
37378 .priority = 1,
37379 };
37380diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
37381index 421ef37..e708530c 100644
37382--- a/drivers/cpufreq/p4-clockmod.c
37383+++ b/drivers/cpufreq/p4-clockmod.c
37384@@ -160,10 +160,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
37385 case 0x0F: /* Core Duo */
37386 case 0x16: /* Celeron Core */
37387 case 0x1C: /* Atom */
37388- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
37389+ pax_open_kernel();
37390+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
37391+ pax_close_kernel();
37392 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
37393 case 0x0D: /* Pentium M (Dothan) */
37394- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
37395+ pax_open_kernel();
37396+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
37397+ pax_close_kernel();
37398 /* fall through */
37399 case 0x09: /* Pentium M (Banias) */
37400 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
37401@@ -175,7 +179,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
37402
37403 /* on P-4s, the TSC runs with constant frequency independent whether
37404 * throttling is active or not. */
37405- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
37406+ pax_open_kernel();
37407+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
37408+ pax_close_kernel();
37409
37410 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
37411 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
37412diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
37413index c71ee14..7c2e183 100644
37414--- a/drivers/cpufreq/sparc-us3-cpufreq.c
37415+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
37416@@ -18,14 +18,12 @@
37417 #include <asm/head.h>
37418 #include <asm/timer.h>
37419
37420-static struct cpufreq_driver *cpufreq_us3_driver;
37421-
37422 struct us3_freq_percpu_info {
37423 struct cpufreq_frequency_table table[4];
37424 };
37425
37426 /* Indexed by cpu number. */
37427-static struct us3_freq_percpu_info *us3_freq_table;
37428+static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
37429
37430 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
37431 * in the Safari config register.
37432@@ -186,12 +184,25 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
37433
37434 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
37435 {
37436- if (cpufreq_us3_driver)
37437- us3_set_cpu_divider_index(policy, 0);
37438+ us3_set_cpu_divider_index(policy->cpu, 0);
37439
37440 return 0;
37441 }
37442
37443+static int __init us3_freq_init(void);
37444+static void __exit us3_freq_exit(void);
37445+
37446+static struct cpufreq_driver cpufreq_us3_driver = {
37447+ .init = us3_freq_cpu_init,
37448+ .verify = us3_freq_verify,
37449+ .target = us3_freq_target,
37450+ .get = us3_freq_get,
37451+ .exit = us3_freq_cpu_exit,
37452+ .owner = THIS_MODULE,
37453+ .name = "UltraSPARC-III",
37454+
37455+};
37456+
37457 static int __init us3_freq_init(void)
37458 {
37459 unsigned long manuf, impl, ver;
37460@@ -208,57 +219,15 @@ static int __init us3_freq_init(void)
37461 (impl == CHEETAH_IMPL ||
37462 impl == CHEETAH_PLUS_IMPL ||
37463 impl == JAGUAR_IMPL ||
37464- impl == PANTHER_IMPL)) {
37465- struct cpufreq_driver *driver;
37466-
37467- ret = -ENOMEM;
37468- driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
37469- if (!driver)
37470- goto err_out;
37471-
37472- us3_freq_table = kzalloc(
37473- (NR_CPUS * sizeof(struct us3_freq_percpu_info)),
37474- GFP_KERNEL);
37475- if (!us3_freq_table)
37476- goto err_out;
37477-
37478- driver->init = us3_freq_cpu_init;
37479- driver->verify = us3_freq_verify;
37480- driver->target = us3_freq_target;
37481- driver->get = us3_freq_get;
37482- driver->exit = us3_freq_cpu_exit;
37483- driver->owner = THIS_MODULE,
37484- strcpy(driver->name, "UltraSPARC-III");
37485-
37486- cpufreq_us3_driver = driver;
37487- ret = cpufreq_register_driver(driver);
37488- if (ret)
37489- goto err_out;
37490-
37491- return 0;
37492-
37493-err_out:
37494- if (driver) {
37495- kfree(driver);
37496- cpufreq_us3_driver = NULL;
37497- }
37498- kfree(us3_freq_table);
37499- us3_freq_table = NULL;
37500- return ret;
37501- }
37502+ impl == PANTHER_IMPL))
37503+ return cpufreq_register_driver(&cpufreq_us3_driver);
37504
37505 return -ENODEV;
37506 }
37507
37508 static void __exit us3_freq_exit(void)
37509 {
37510- if (cpufreq_us3_driver) {
37511- cpufreq_unregister_driver(cpufreq_us3_driver);
37512- kfree(cpufreq_us3_driver);
37513- cpufreq_us3_driver = NULL;
37514- kfree(us3_freq_table);
37515- us3_freq_table = NULL;
37516- }
37517+ cpufreq_unregister_driver(&cpufreq_us3_driver);
37518 }
37519
37520 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
37521diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
37522index 618e6f4..e89d915 100644
37523--- a/drivers/cpufreq/speedstep-centrino.c
37524+++ b/drivers/cpufreq/speedstep-centrino.c
37525@@ -353,8 +353,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
37526 !cpu_has(cpu, X86_FEATURE_EST))
37527 return -ENODEV;
37528
37529- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
37530- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
37531+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
37532+ pax_open_kernel();
37533+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
37534+ pax_close_kernel();
37535+ }
37536
37537 if (policy->cpu != 0)
37538 return -ENODEV;
37539diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
37540index c3a93fe..e808f24 100644
37541--- a/drivers/cpuidle/cpuidle.c
37542+++ b/drivers/cpuidle/cpuidle.c
37543@@ -254,7 +254,7 @@ static int poll_idle(struct cpuidle_device *dev,
37544
37545 static void poll_idle_init(struct cpuidle_driver *drv)
37546 {
37547- struct cpuidle_state *state = &drv->states[0];
37548+ cpuidle_state_no_const *state = &drv->states[0];
37549
37550 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
37551 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
37552diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
37553index ea2f8e7..70ac501 100644
37554--- a/drivers/cpuidle/governor.c
37555+++ b/drivers/cpuidle/governor.c
37556@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
37557 mutex_lock(&cpuidle_lock);
37558 if (__cpuidle_find_governor(gov->name) == NULL) {
37559 ret = 0;
37560- list_add_tail(&gov->governor_list, &cpuidle_governors);
37561+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
37562 if (!cpuidle_curr_governor ||
37563 cpuidle_curr_governor->rating < gov->rating)
37564 cpuidle_switch_governor(gov);
37565@@ -135,7 +135,7 @@ void cpuidle_unregister_governor(struct cpuidle_governor *gov)
37566 new_gov = cpuidle_replace_governor(gov->rating);
37567 cpuidle_switch_governor(new_gov);
37568 }
37569- list_del(&gov->governor_list);
37570+ pax_list_del((struct list_head *)&gov->governor_list);
37571 mutex_unlock(&cpuidle_lock);
37572 }
37573
37574diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
37575index 428754a..8bdf9cc 100644
37576--- a/drivers/cpuidle/sysfs.c
37577+++ b/drivers/cpuidle/sysfs.c
37578@@ -131,7 +131,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
37579 NULL
37580 };
37581
37582-static struct attribute_group cpuidle_attr_group = {
37583+static attribute_group_no_const cpuidle_attr_group = {
37584 .attrs = cpuidle_default_attrs,
37585 .name = "cpuidle",
37586 };
37587diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
37588index 3b36797..db0b0c0 100644
37589--- a/drivers/devfreq/devfreq.c
37590+++ b/drivers/devfreq/devfreq.c
37591@@ -477,7 +477,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
37592 GFP_KERNEL);
37593 devfreq->last_stat_updated = jiffies;
37594
37595- dev_set_name(&devfreq->dev, dev_name(dev));
37596+ dev_set_name(&devfreq->dev, "%s", dev_name(dev));
37597 err = device_register(&devfreq->dev);
37598 if (err) {
37599 put_device(&devfreq->dev);
37600@@ -588,7 +588,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
37601 goto err_out;
37602 }
37603
37604- list_add(&governor->node, &devfreq_governor_list);
37605+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
37606
37607 list_for_each_entry(devfreq, &devfreq_list, node) {
37608 int ret = 0;
37609@@ -676,7 +676,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
37610 }
37611 }
37612
37613- list_del(&governor->node);
37614+ pax_list_del((struct list_head *)&governor->node);
37615 err_out:
37616 mutex_unlock(&devfreq_list_lock);
37617
37618diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c
37619index b70709b..1d8d02a 100644
37620--- a/drivers/dma/sh/shdma.c
37621+++ b/drivers/dma/sh/shdma.c
37622@@ -476,7 +476,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
37623 return ret;
37624 }
37625
37626-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
37627+static struct notifier_block sh_dmae_nmi_notifier = {
37628 .notifier_call = sh_dmae_nmi_handler,
37629
37630 /* Run before NMI debug handler and KGDB */
37631diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
37632index c4d700a..0b57abd 100644
37633--- a/drivers/edac/edac_mc_sysfs.c
37634+++ b/drivers/edac/edac_mc_sysfs.c
37635@@ -148,7 +148,7 @@ static const char * const edac_caps[] = {
37636 struct dev_ch_attribute {
37637 struct device_attribute attr;
37638 int channel;
37639-};
37640+} __do_const;
37641
37642 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
37643 struct dev_ch_attribute dev_attr_legacy_##_name = \
37644@@ -1005,14 +1005,16 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
37645 }
37646
37647 if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
37648+ pax_open_kernel();
37649 if (mci->get_sdram_scrub_rate) {
37650- dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
37651- dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
37652+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
37653+ *(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
37654 }
37655 if (mci->set_sdram_scrub_rate) {
37656- dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
37657- dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
37658+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
37659+ *(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
37660 }
37661+ pax_close_kernel();
37662 err = device_create_file(&mci->dev,
37663 &dev_attr_sdram_scrub_rate);
37664 if (err) {
37665diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
37666index e8658e4..22746d6 100644
37667--- a/drivers/edac/edac_pci_sysfs.c
37668+++ b/drivers/edac/edac_pci_sysfs.c
37669@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
37670 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
37671 static int edac_pci_poll_msec = 1000; /* one second workq period */
37672
37673-static atomic_t pci_parity_count = ATOMIC_INIT(0);
37674-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
37675+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
37676+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
37677
37678 static struct kobject *edac_pci_top_main_kobj;
37679 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
37680@@ -235,7 +235,7 @@ struct edac_pci_dev_attribute {
37681 void *value;
37682 ssize_t(*show) (void *, char *);
37683 ssize_t(*store) (void *, const char *, size_t);
37684-};
37685+} __do_const;
37686
37687 /* Set of show/store abstract level functions for PCI Parity object */
37688 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
37689@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
37690 edac_printk(KERN_CRIT, EDAC_PCI,
37691 "Signaled System Error on %s\n",
37692 pci_name(dev));
37693- atomic_inc(&pci_nonparity_count);
37694+ atomic_inc_unchecked(&pci_nonparity_count);
37695 }
37696
37697 if (status & (PCI_STATUS_PARITY)) {
37698@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
37699 "Master Data Parity Error on %s\n",
37700 pci_name(dev));
37701
37702- atomic_inc(&pci_parity_count);
37703+ atomic_inc_unchecked(&pci_parity_count);
37704 }
37705
37706 if (status & (PCI_STATUS_DETECTED_PARITY)) {
37707@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
37708 "Detected Parity Error on %s\n",
37709 pci_name(dev));
37710
37711- atomic_inc(&pci_parity_count);
37712+ atomic_inc_unchecked(&pci_parity_count);
37713 }
37714 }
37715
37716@@ -618,7 +618,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
37717 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
37718 "Signaled System Error on %s\n",
37719 pci_name(dev));
37720- atomic_inc(&pci_nonparity_count);
37721+ atomic_inc_unchecked(&pci_nonparity_count);
37722 }
37723
37724 if (status & (PCI_STATUS_PARITY)) {
37725@@ -626,7 +626,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
37726 "Master Data Parity Error on "
37727 "%s\n", pci_name(dev));
37728
37729- atomic_inc(&pci_parity_count);
37730+ atomic_inc_unchecked(&pci_parity_count);
37731 }
37732
37733 if (status & (PCI_STATUS_DETECTED_PARITY)) {
37734@@ -634,7 +634,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
37735 "Detected Parity Error on %s\n",
37736 pci_name(dev));
37737
37738- atomic_inc(&pci_parity_count);
37739+ atomic_inc_unchecked(&pci_parity_count);
37740 }
37741 }
37742 }
37743@@ -672,7 +672,7 @@ void edac_pci_do_parity_check(void)
37744 if (!check_pci_errors)
37745 return;
37746
37747- before_count = atomic_read(&pci_parity_count);
37748+ before_count = atomic_read_unchecked(&pci_parity_count);
37749
37750 /* scan all PCI devices looking for a Parity Error on devices and
37751 * bridges.
37752@@ -684,7 +684,7 @@ void edac_pci_do_parity_check(void)
37753 /* Only if operator has selected panic on PCI Error */
37754 if (edac_pci_get_panic_on_pe()) {
37755 /* If the count is different 'after' from 'before' */
37756- if (before_count != atomic_read(&pci_parity_count))
37757+ if (before_count != atomic_read_unchecked(&pci_parity_count))
37758 panic("EDAC: PCI Parity Error");
37759 }
37760 }
37761diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
37762index 51b7e3a..aa8a3e8 100644
37763--- a/drivers/edac/mce_amd.h
37764+++ b/drivers/edac/mce_amd.h
37765@@ -77,7 +77,7 @@ struct amd_decoder_ops {
37766 bool (*mc0_mce)(u16, u8);
37767 bool (*mc1_mce)(u16, u8);
37768 bool (*mc2_mce)(u16, u8);
37769-};
37770+} __no_const;
37771
37772 void amd_report_gart_errors(bool);
37773 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
37774diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
37775index 57ea7f4..789e3c3 100644
37776--- a/drivers/firewire/core-card.c
37777+++ b/drivers/firewire/core-card.c
37778@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
37779
37780 void fw_core_remove_card(struct fw_card *card)
37781 {
37782- struct fw_card_driver dummy_driver = dummy_driver_template;
37783+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
37784
37785 card->driver->update_phy_reg(card, 4,
37786 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
37787diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
37788index 664a6ff..af13580 100644
37789--- a/drivers/firewire/core-device.c
37790+++ b/drivers/firewire/core-device.c
37791@@ -232,7 +232,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
37792 struct config_rom_attribute {
37793 struct device_attribute attr;
37794 u32 key;
37795-};
37796+} __do_const;
37797
37798 static ssize_t show_immediate(struct device *dev,
37799 struct device_attribute *dattr, char *buf)
37800diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
37801index 28a94c7..58da63a 100644
37802--- a/drivers/firewire/core-transaction.c
37803+++ b/drivers/firewire/core-transaction.c
37804@@ -38,6 +38,7 @@
37805 #include <linux/timer.h>
37806 #include <linux/types.h>
37807 #include <linux/workqueue.h>
37808+#include <linux/sched.h>
37809
37810 #include <asm/byteorder.h>
37811
37812diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
37813index 515a42c..5ecf3ba 100644
37814--- a/drivers/firewire/core.h
37815+++ b/drivers/firewire/core.h
37816@@ -111,6 +111,7 @@ struct fw_card_driver {
37817
37818 int (*stop_iso)(struct fw_iso_context *ctx);
37819 };
37820+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
37821
37822 void fw_card_initialize(struct fw_card *card,
37823 const struct fw_card_driver *driver, struct device *device);
37824diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
37825index 94a58a0..f5eba42 100644
37826--- a/drivers/firmware/dmi-id.c
37827+++ b/drivers/firmware/dmi-id.c
37828@@ -16,7 +16,7 @@
37829 struct dmi_device_attribute{
37830 struct device_attribute dev_attr;
37831 int field;
37832-};
37833+} __do_const;
37834 #define to_dmi_dev_attr(_dev_attr) \
37835 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
37836
37837diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
37838index b95159b..841ae55 100644
37839--- a/drivers/firmware/dmi_scan.c
37840+++ b/drivers/firmware/dmi_scan.c
37841@@ -497,11 +497,6 @@ void __init dmi_scan_machine(void)
37842 }
37843 }
37844 else {
37845- /*
37846- * no iounmap() for that ioremap(); it would be a no-op, but
37847- * it's so early in setup that sucker gets confused into doing
37848- * what it shouldn't if we actually call it.
37849- */
37850 p = dmi_ioremap(0xF0000, 0x10000);
37851 if (p == NULL)
37852 goto error;
37853@@ -786,7 +781,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
37854 if (buf == NULL)
37855 return -1;
37856
37857- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
37858+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
37859
37860 iounmap(buf);
37861 return 0;
37862diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
37863index 5145fa3..0d3babd 100644
37864--- a/drivers/firmware/efi/efi.c
37865+++ b/drivers/firmware/efi/efi.c
37866@@ -65,14 +65,16 @@ static struct attribute_group efi_subsys_attr_group = {
37867 };
37868
37869 static struct efivars generic_efivars;
37870-static struct efivar_operations generic_ops;
37871+static efivar_operations_no_const generic_ops __read_only;
37872
37873 static int generic_ops_register(void)
37874 {
37875- generic_ops.get_variable = efi.get_variable;
37876- generic_ops.set_variable = efi.set_variable;
37877- generic_ops.get_next_variable = efi.get_next_variable;
37878- generic_ops.query_variable_store = efi_query_variable_store;
37879+ pax_open_kernel();
37880+ *(void **)&generic_ops.get_variable = efi.get_variable;
37881+ *(void **)&generic_ops.set_variable = efi.set_variable;
37882+ *(void **)&generic_ops.get_next_variable = efi.get_next_variable;
37883+ *(void **)&generic_ops.query_variable_store = efi_query_variable_store;
37884+ pax_close_kernel();
37885
37886 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
37887 }
37888diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
37889index 8bd1bb6..c48b0c6 100644
37890--- a/drivers/firmware/efi/efivars.c
37891+++ b/drivers/firmware/efi/efivars.c
37892@@ -452,7 +452,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
37893 static int
37894 create_efivars_bin_attributes(void)
37895 {
37896- struct bin_attribute *attr;
37897+ bin_attribute_no_const *attr;
37898 int error;
37899
37900 /* new_var */
37901diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
37902index 2a90ba6..07f3733 100644
37903--- a/drivers/firmware/google/memconsole.c
37904+++ b/drivers/firmware/google/memconsole.c
37905@@ -147,7 +147,9 @@ static int __init memconsole_init(void)
37906 if (!found_memconsole())
37907 return -ENODEV;
37908
37909- memconsole_bin_attr.size = memconsole_length;
37910+ pax_open_kernel();
37911+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
37912+ pax_close_kernel();
37913
37914 ret = sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
37915
37916diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
37917index e16d932..f0206ef 100644
37918--- a/drivers/gpio/gpio-ich.c
37919+++ b/drivers/gpio/gpio-ich.c
37920@@ -69,7 +69,7 @@ struct ichx_desc {
37921 /* Some chipsets have quirks, let these use their own request/get */
37922 int (*request)(struct gpio_chip *chip, unsigned offset);
37923 int (*get)(struct gpio_chip *chip, unsigned offset);
37924-};
37925+} __do_const;
37926
37927 static struct {
37928 spinlock_t lock;
37929diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
37930index 9902732..64b62dd 100644
37931--- a/drivers/gpio/gpio-vr41xx.c
37932+++ b/drivers/gpio/gpio-vr41xx.c
37933@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
37934 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
37935 maskl, pendl, maskh, pendh);
37936
37937- atomic_inc(&irq_err_count);
37938+ atomic_inc_unchecked(&irq_err_count);
37939
37940 return -EINVAL;
37941 }
37942diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
37943index ed1334e..ee0dd42 100644
37944--- a/drivers/gpu/drm/drm_crtc_helper.c
37945+++ b/drivers/gpu/drm/drm_crtc_helper.c
37946@@ -321,7 +321,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
37947 struct drm_crtc *tmp;
37948 int crtc_mask = 1;
37949
37950- WARN(!crtc, "checking null crtc?\n");
37951+ BUG_ON(!crtc);
37952
37953 dev = crtc->dev;
37954
37955diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
37956index 9cc247f..36aa285 100644
37957--- a/drivers/gpu/drm/drm_drv.c
37958+++ b/drivers/gpu/drm/drm_drv.c
37959@@ -306,7 +306,7 @@ module_exit(drm_core_exit);
37960 /**
37961 * Copy and IOCTL return string to user space
37962 */
37963-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
37964+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
37965 {
37966 int len;
37967
37968@@ -376,7 +376,7 @@ long drm_ioctl(struct file *filp,
37969 struct drm_file *file_priv = filp->private_data;
37970 struct drm_device *dev;
37971 const struct drm_ioctl_desc *ioctl = NULL;
37972- drm_ioctl_t *func;
37973+ drm_ioctl_no_const_t func;
37974 unsigned int nr = DRM_IOCTL_NR(cmd);
37975 int retcode = -EINVAL;
37976 char stack_kdata[128];
37977@@ -389,7 +389,7 @@ long drm_ioctl(struct file *filp,
37978 return -ENODEV;
37979
37980 atomic_inc(&dev->ioctl_count);
37981- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
37982+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
37983 ++file_priv->ioctl_count;
37984
37985 if ((nr >= DRM_CORE_IOCTL_COUNT) &&
37986diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
37987index 429e07d..e681a2c 100644
37988--- a/drivers/gpu/drm/drm_fops.c
37989+++ b/drivers/gpu/drm/drm_fops.c
37990@@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
37991 }
37992
37993 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
37994- atomic_set(&dev->counts[i], 0);
37995+ atomic_set_unchecked(&dev->counts[i], 0);
37996
37997 dev->sigdata.lock = NULL;
37998
37999@@ -135,7 +135,7 @@ int drm_open(struct inode *inode, struct file *filp)
38000 if (drm_device_is_unplugged(dev))
38001 return -ENODEV;
38002
38003- if (!dev->open_count++)
38004+ if (local_inc_return(&dev->open_count) == 1)
38005 need_setup = 1;
38006 mutex_lock(&dev->struct_mutex);
38007 old_imapping = inode->i_mapping;
38008@@ -151,7 +151,7 @@ int drm_open(struct inode *inode, struct file *filp)
38009 retcode = drm_open_helper(inode, filp, dev);
38010 if (retcode)
38011 goto err_undo;
38012- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
38013+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
38014 if (need_setup) {
38015 retcode = drm_setup(dev);
38016 if (retcode)
38017@@ -166,7 +166,7 @@ err_undo:
38018 iput(container_of(dev->dev_mapping, struct inode, i_data));
38019 dev->dev_mapping = old_mapping;
38020 mutex_unlock(&dev->struct_mutex);
38021- dev->open_count--;
38022+ local_dec(&dev->open_count);
38023 return retcode;
38024 }
38025 EXPORT_SYMBOL(drm_open);
38026@@ -441,7 +441,7 @@ int drm_release(struct inode *inode, struct file *filp)
38027
38028 mutex_lock(&drm_global_mutex);
38029
38030- DRM_DEBUG("open_count = %d\n", dev->open_count);
38031+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
38032
38033 if (dev->driver->preclose)
38034 dev->driver->preclose(dev, file_priv);
38035@@ -450,10 +450,10 @@ int drm_release(struct inode *inode, struct file *filp)
38036 * Begin inline drm_release
38037 */
38038
38039- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
38040+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
38041 task_pid_nr(current),
38042 (long)old_encode_dev(file_priv->minor->device),
38043- dev->open_count);
38044+ local_read(&dev->open_count));
38045
38046 /* Release any auth tokens that might point to this file_priv,
38047 (do that under the drm_global_mutex) */
38048@@ -550,8 +550,8 @@ int drm_release(struct inode *inode, struct file *filp)
38049 * End inline drm_release
38050 */
38051
38052- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
38053- if (!--dev->open_count) {
38054+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
38055+ if (local_dec_and_test(&dev->open_count)) {
38056 if (atomic_read(&dev->ioctl_count)) {
38057 DRM_ERROR("Device busy: %d\n",
38058 atomic_read(&dev->ioctl_count));
38059diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
38060index f731116..629842c 100644
38061--- a/drivers/gpu/drm/drm_global.c
38062+++ b/drivers/gpu/drm/drm_global.c
38063@@ -36,7 +36,7 @@
38064 struct drm_global_item {
38065 struct mutex mutex;
38066 void *object;
38067- int refcount;
38068+ atomic_t refcount;
38069 };
38070
38071 static struct drm_global_item glob[DRM_GLOBAL_NUM];
38072@@ -49,7 +49,7 @@ void drm_global_init(void)
38073 struct drm_global_item *item = &glob[i];
38074 mutex_init(&item->mutex);
38075 item->object = NULL;
38076- item->refcount = 0;
38077+ atomic_set(&item->refcount, 0);
38078 }
38079 }
38080
38081@@ -59,7 +59,7 @@ void drm_global_release(void)
38082 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
38083 struct drm_global_item *item = &glob[i];
38084 BUG_ON(item->object != NULL);
38085- BUG_ON(item->refcount != 0);
38086+ BUG_ON(atomic_read(&item->refcount) != 0);
38087 }
38088 }
38089
38090@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
38091 void *object;
38092
38093 mutex_lock(&item->mutex);
38094- if (item->refcount == 0) {
38095+ if (atomic_read(&item->refcount) == 0) {
38096 item->object = kzalloc(ref->size, GFP_KERNEL);
38097 if (unlikely(item->object == NULL)) {
38098 ret = -ENOMEM;
38099@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
38100 goto out_err;
38101
38102 }
38103- ++item->refcount;
38104+ atomic_inc(&item->refcount);
38105 ref->object = item->object;
38106 object = item->object;
38107 mutex_unlock(&item->mutex);
38108@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
38109 struct drm_global_item *item = &glob[ref->global_type];
38110
38111 mutex_lock(&item->mutex);
38112- BUG_ON(item->refcount == 0);
38113+ BUG_ON(atomic_read(&item->refcount) == 0);
38114 BUG_ON(ref->object != item->object);
38115- if (--item->refcount == 0) {
38116+ if (atomic_dec_and_test(&item->refcount)) {
38117 ref->release(ref);
38118 item->object = NULL;
38119 }
38120diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
38121index d4b20ce..77a8d41 100644
38122--- a/drivers/gpu/drm/drm_info.c
38123+++ b/drivers/gpu/drm/drm_info.c
38124@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
38125 struct drm_local_map *map;
38126 struct drm_map_list *r_list;
38127
38128- /* Hardcoded from _DRM_FRAME_BUFFER,
38129- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
38130- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
38131- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
38132+ static const char * const types[] = {
38133+ [_DRM_FRAME_BUFFER] = "FB",
38134+ [_DRM_REGISTERS] = "REG",
38135+ [_DRM_SHM] = "SHM",
38136+ [_DRM_AGP] = "AGP",
38137+ [_DRM_SCATTER_GATHER] = "SG",
38138+ [_DRM_CONSISTENT] = "PCI",
38139+ [_DRM_GEM] = "GEM" };
38140 const char *type;
38141 int i;
38142
38143@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
38144 map = r_list->map;
38145 if (!map)
38146 continue;
38147- if (map->type < 0 || map->type > 5)
38148+ if (map->type >= ARRAY_SIZE(types))
38149 type = "??";
38150 else
38151 type = types[map->type];
38152@@ -253,7 +257,11 @@ int drm_vma_info(struct seq_file *m, void *data)
38153 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
38154 vma->vm_flags & VM_LOCKED ? 'l' : '-',
38155 vma->vm_flags & VM_IO ? 'i' : '-',
38156+#ifdef CONFIG_GRKERNSEC_HIDESYM
38157+ 0);
38158+#else
38159 vma->vm_pgoff);
38160+#endif
38161
38162 #if defined(__i386__)
38163 pgprot = pgprot_val(vma->vm_page_prot);
38164diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
38165index 2f4c434..dd12cd2 100644
38166--- a/drivers/gpu/drm/drm_ioc32.c
38167+++ b/drivers/gpu/drm/drm_ioc32.c
38168@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
38169 request = compat_alloc_user_space(nbytes);
38170 if (!access_ok(VERIFY_WRITE, request, nbytes))
38171 return -EFAULT;
38172- list = (struct drm_buf_desc *) (request + 1);
38173+ list = (struct drm_buf_desc __user *) (request + 1);
38174
38175 if (__put_user(count, &request->count)
38176 || __put_user(list, &request->list))
38177@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
38178 request = compat_alloc_user_space(nbytes);
38179 if (!access_ok(VERIFY_WRITE, request, nbytes))
38180 return -EFAULT;
38181- list = (struct drm_buf_pub *) (request + 1);
38182+ list = (struct drm_buf_pub __user *) (request + 1);
38183
38184 if (__put_user(count, &request->count)
38185 || __put_user(list, &request->list))
38186@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
38187 return 0;
38188 }
38189
38190-drm_ioctl_compat_t *drm_compat_ioctls[] = {
38191+drm_ioctl_compat_t drm_compat_ioctls[] = {
38192 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
38193 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
38194 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
38195@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
38196 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
38197 {
38198 unsigned int nr = DRM_IOCTL_NR(cmd);
38199- drm_ioctl_compat_t *fn;
38200 int ret;
38201
38202 /* Assume that ioctls without an explicit compat routine will just
38203@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
38204 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
38205 return drm_ioctl(filp, cmd, arg);
38206
38207- fn = drm_compat_ioctls[nr];
38208-
38209- if (fn != NULL)
38210- ret = (*fn) (filp, cmd, arg);
38211+ if (drm_compat_ioctls[nr] != NULL)
38212+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
38213 else
38214 ret = drm_ioctl(filp, cmd, arg);
38215
38216diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
38217index e77bd8b..1571b85 100644
38218--- a/drivers/gpu/drm/drm_ioctl.c
38219+++ b/drivers/gpu/drm/drm_ioctl.c
38220@@ -252,7 +252,7 @@ int drm_getstats(struct drm_device *dev, void *data,
38221 stats->data[i].value =
38222 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
38223 else
38224- stats->data[i].value = atomic_read(&dev->counts[i]);
38225+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
38226 stats->data[i].type = dev->types[i];
38227 }
38228
38229diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
38230index d752c96..fe08455 100644
38231--- a/drivers/gpu/drm/drm_lock.c
38232+++ b/drivers/gpu/drm/drm_lock.c
38233@@ -86,7 +86,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
38234 if (drm_lock_take(&master->lock, lock->context)) {
38235 master->lock.file_priv = file_priv;
38236 master->lock.lock_time = jiffies;
38237- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
38238+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
38239 break; /* Got lock */
38240 }
38241
38242@@ -157,7 +157,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
38243 return -EINVAL;
38244 }
38245
38246- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
38247+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
38248
38249 if (drm_lock_free(&master->lock, lock->context)) {
38250 /* FIXME: Should really bail out here. */
38251diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
38252index 16f3ec5..b28f9ca 100644
38253--- a/drivers/gpu/drm/drm_stub.c
38254+++ b/drivers/gpu/drm/drm_stub.c
38255@@ -501,7 +501,7 @@ void drm_unplug_dev(struct drm_device *dev)
38256
38257 drm_device_set_unplugged(dev);
38258
38259- if (dev->open_count == 0) {
38260+ if (local_read(&dev->open_count) == 0) {
38261 drm_put_dev(dev);
38262 }
38263 mutex_unlock(&drm_global_mutex);
38264diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
38265index 0229665..f61329c 100644
38266--- a/drivers/gpu/drm/drm_sysfs.c
38267+++ b/drivers/gpu/drm/drm_sysfs.c
38268@@ -499,7 +499,7 @@ EXPORT_SYMBOL(drm_sysfs_hotplug_event);
38269 int drm_sysfs_device_add(struct drm_minor *minor)
38270 {
38271 int err;
38272- char *minor_str;
38273+ const char *minor_str;
38274
38275 minor->kdev.parent = minor->dev->dev;
38276
38277diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
38278index 004ecdf..db1f6e0 100644
38279--- a/drivers/gpu/drm/i810/i810_dma.c
38280+++ b/drivers/gpu/drm/i810/i810_dma.c
38281@@ -945,8 +945,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
38282 dma->buflist[vertex->idx],
38283 vertex->discard, vertex->used);
38284
38285- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
38286- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
38287+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
38288+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
38289 sarea_priv->last_enqueue = dev_priv->counter - 1;
38290 sarea_priv->last_dispatch = (int)hw_status[5];
38291
38292@@ -1106,8 +1106,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
38293 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
38294 mc->last_render);
38295
38296- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
38297- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
38298+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
38299+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
38300 sarea_priv->last_enqueue = dev_priv->counter - 1;
38301 sarea_priv->last_dispatch = (int)hw_status[5];
38302
38303diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
38304index 6e0acad..93c8289 100644
38305--- a/drivers/gpu/drm/i810/i810_drv.h
38306+++ b/drivers/gpu/drm/i810/i810_drv.h
38307@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
38308 int page_flipping;
38309
38310 wait_queue_head_t irq_queue;
38311- atomic_t irq_received;
38312- atomic_t irq_emitted;
38313+ atomic_unchecked_t irq_received;
38314+ atomic_unchecked_t irq_emitted;
38315
38316 int front_offset;
38317 } drm_i810_private_t;
38318diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
38319index e913d32..4d9b351 100644
38320--- a/drivers/gpu/drm/i915/i915_debugfs.c
38321+++ b/drivers/gpu/drm/i915/i915_debugfs.c
38322@@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
38323 I915_READ(GTIMR));
38324 }
38325 seq_printf(m, "Interrupts received: %d\n",
38326- atomic_read(&dev_priv->irq_received));
38327+ atomic_read_unchecked(&dev_priv->irq_received));
38328 for_each_ring(ring, dev_priv, i) {
38329 if (IS_GEN6(dev) || IS_GEN7(dev)) {
38330 seq_printf(m,
38331diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
38332index 17d9b0b..860e6d9 100644
38333--- a/drivers/gpu/drm/i915/i915_dma.c
38334+++ b/drivers/gpu/drm/i915/i915_dma.c
38335@@ -1259,7 +1259,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
38336 bool can_switch;
38337
38338 spin_lock(&dev->count_lock);
38339- can_switch = (dev->open_count == 0);
38340+ can_switch = (local_read(&dev->open_count) == 0);
38341 spin_unlock(&dev->count_lock);
38342 return can_switch;
38343 }
38344diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
38345index 47d8b68..52f5d8d 100644
38346--- a/drivers/gpu/drm/i915/i915_drv.h
38347+++ b/drivers/gpu/drm/i915/i915_drv.h
38348@@ -916,7 +916,7 @@ typedef struct drm_i915_private {
38349 drm_dma_handle_t *status_page_dmah;
38350 struct resource mch_res;
38351
38352- atomic_t irq_received;
38353+ atomic_unchecked_t irq_received;
38354
38355 /* protects the irq masks */
38356 spinlock_t irq_lock;
38357@@ -1813,7 +1813,7 @@ extern struct i2c_adapter *intel_gmbus_get_adapter(
38358 struct drm_i915_private *dev_priv, unsigned port);
38359 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
38360 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
38361-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
38362+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
38363 {
38364 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
38365 }
38366diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
38367index 117ce38..eefd237 100644
38368--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
38369+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
38370@@ -727,9 +727,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
38371
38372 static int
38373 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
38374- int count)
38375+ unsigned int count)
38376 {
38377- int i;
38378+ unsigned int i;
38379 int relocs_total = 0;
38380 int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
38381
38382diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
38383index 3c59584..500f2e9 100644
38384--- a/drivers/gpu/drm/i915/i915_ioc32.c
38385+++ b/drivers/gpu/drm/i915/i915_ioc32.c
38386@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
38387 (unsigned long)request);
38388 }
38389
38390-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
38391+static drm_ioctl_compat_t i915_compat_ioctls[] = {
38392 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
38393 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
38394 [DRM_I915_GETPARAM] = compat_i915_getparam,
38395@@ -202,18 +202,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
38396 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
38397 {
38398 unsigned int nr = DRM_IOCTL_NR(cmd);
38399- drm_ioctl_compat_t *fn = NULL;
38400 int ret;
38401
38402 if (nr < DRM_COMMAND_BASE)
38403 return drm_compat_ioctl(filp, cmd, arg);
38404
38405- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
38406- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
38407-
38408- if (fn != NULL)
38409+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls)) {
38410+ drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
38411 ret = (*fn) (filp, cmd, arg);
38412- else
38413+ } else
38414 ret = drm_ioctl(filp, cmd, arg);
38415
38416 return ret;
38417diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
38418index e5e32869..1678f36 100644
38419--- a/drivers/gpu/drm/i915/i915_irq.c
38420+++ b/drivers/gpu/drm/i915/i915_irq.c
38421@@ -670,7 +670,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
38422 int pipe;
38423 u32 pipe_stats[I915_MAX_PIPES];
38424
38425- atomic_inc(&dev_priv->irq_received);
38426+ atomic_inc_unchecked(&dev_priv->irq_received);
38427
38428 while (true) {
38429 iir = I915_READ(VLV_IIR);
38430@@ -835,7 +835,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
38431 irqreturn_t ret = IRQ_NONE;
38432 int i;
38433
38434- atomic_inc(&dev_priv->irq_received);
38435+ atomic_inc_unchecked(&dev_priv->irq_received);
38436
38437 /* disable master interrupt before clearing iir */
38438 de_ier = I915_READ(DEIER);
38439@@ -925,7 +925,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
38440 int ret = IRQ_NONE;
38441 u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
38442
38443- atomic_inc(&dev_priv->irq_received);
38444+ atomic_inc_unchecked(&dev_priv->irq_received);
38445
38446 /* disable master interrupt before clearing iir */
38447 de_ier = I915_READ(DEIER);
38448@@ -2089,7 +2089,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
38449 {
38450 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
38451
38452- atomic_set(&dev_priv->irq_received, 0);
38453+ atomic_set_unchecked(&dev_priv->irq_received, 0);
38454
38455 I915_WRITE(HWSTAM, 0xeffe);
38456
38457@@ -2124,7 +2124,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
38458 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
38459 int pipe;
38460
38461- atomic_set(&dev_priv->irq_received, 0);
38462+ atomic_set_unchecked(&dev_priv->irq_received, 0);
38463
38464 /* VLV magic */
38465 I915_WRITE(VLV_IMR, 0);
38466@@ -2411,7 +2411,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
38467 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
38468 int pipe;
38469
38470- atomic_set(&dev_priv->irq_received, 0);
38471+ atomic_set_unchecked(&dev_priv->irq_received, 0);
38472
38473 for_each_pipe(pipe)
38474 I915_WRITE(PIPESTAT(pipe), 0);
38475@@ -2490,7 +2490,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
38476 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
38477 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
38478
38479- atomic_inc(&dev_priv->irq_received);
38480+ atomic_inc_unchecked(&dev_priv->irq_received);
38481
38482 iir = I915_READ16(IIR);
38483 if (iir == 0)
38484@@ -2565,7 +2565,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
38485 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
38486 int pipe;
38487
38488- atomic_set(&dev_priv->irq_received, 0);
38489+ atomic_set_unchecked(&dev_priv->irq_received, 0);
38490
38491 if (I915_HAS_HOTPLUG(dev)) {
38492 I915_WRITE(PORT_HOTPLUG_EN, 0);
38493@@ -2664,7 +2664,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
38494 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
38495 int pipe, ret = IRQ_NONE;
38496
38497- atomic_inc(&dev_priv->irq_received);
38498+ atomic_inc_unchecked(&dev_priv->irq_received);
38499
38500 iir = I915_READ(IIR);
38501 do {
38502@@ -2791,7 +2791,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
38503 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
38504 int pipe;
38505
38506- atomic_set(&dev_priv->irq_received, 0);
38507+ atomic_set_unchecked(&dev_priv->irq_received, 0);
38508
38509 I915_WRITE(PORT_HOTPLUG_EN, 0);
38510 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
38511@@ -2898,7 +2898,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
38512 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
38513 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
38514
38515- atomic_inc(&dev_priv->irq_received);
38516+ atomic_inc_unchecked(&dev_priv->irq_received);
38517
38518 iir = I915_READ(IIR);
38519
38520diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
38521index eea5982..eeef407 100644
38522--- a/drivers/gpu/drm/i915/intel_display.c
38523+++ b/drivers/gpu/drm/i915/intel_display.c
38524@@ -8935,13 +8935,13 @@ struct intel_quirk {
38525 int subsystem_vendor;
38526 int subsystem_device;
38527 void (*hook)(struct drm_device *dev);
38528-};
38529+} __do_const;
38530
38531 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
38532 struct intel_dmi_quirk {
38533 void (*hook)(struct drm_device *dev);
38534 const struct dmi_system_id (*dmi_id_list)[];
38535-};
38536+} __do_const;
38537
38538 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
38539 {
38540@@ -8949,18 +8949,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
38541 return 1;
38542 }
38543
38544-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
38545+static const struct dmi_system_id intel_dmi_quirks_table[] = {
38546 {
38547- .dmi_id_list = &(const struct dmi_system_id[]) {
38548- {
38549- .callback = intel_dmi_reverse_brightness,
38550- .ident = "NCR Corporation",
38551- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
38552- DMI_MATCH(DMI_PRODUCT_NAME, ""),
38553- },
38554- },
38555- { } /* terminating entry */
38556+ .callback = intel_dmi_reverse_brightness,
38557+ .ident = "NCR Corporation",
38558+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
38559+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
38560 },
38561+ },
38562+ { } /* terminating entry */
38563+};
38564+
38565+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
38566+ {
38567+ .dmi_id_list = &intel_dmi_quirks_table,
38568 .hook = quirk_invert_brightness,
38569 },
38570 };
38571diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
38572index 54558a0..2d97005 100644
38573--- a/drivers/gpu/drm/mga/mga_drv.h
38574+++ b/drivers/gpu/drm/mga/mga_drv.h
38575@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
38576 u32 clear_cmd;
38577 u32 maccess;
38578
38579- atomic_t vbl_received; /**< Number of vblanks received. */
38580+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
38581 wait_queue_head_t fence_queue;
38582- atomic_t last_fence_retired;
38583+ atomic_unchecked_t last_fence_retired;
38584 u32 next_fence_to_post;
38585
38586 unsigned int fb_cpp;
38587diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
38588index 709e90d..89a1c0d 100644
38589--- a/drivers/gpu/drm/mga/mga_ioc32.c
38590+++ b/drivers/gpu/drm/mga/mga_ioc32.c
38591@@ -189,7 +189,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
38592 return 0;
38593 }
38594
38595-drm_ioctl_compat_t *mga_compat_ioctls[] = {
38596+drm_ioctl_compat_t mga_compat_ioctls[] = {
38597 [DRM_MGA_INIT] = compat_mga_init,
38598 [DRM_MGA_GETPARAM] = compat_mga_getparam,
38599 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
38600@@ -207,18 +207,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
38601 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
38602 {
38603 unsigned int nr = DRM_IOCTL_NR(cmd);
38604- drm_ioctl_compat_t *fn = NULL;
38605 int ret;
38606
38607 if (nr < DRM_COMMAND_BASE)
38608 return drm_compat_ioctl(filp, cmd, arg);
38609
38610- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
38611- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
38612-
38613- if (fn != NULL)
38614+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) {
38615+ drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
38616 ret = (*fn) (filp, cmd, arg);
38617- else
38618+ } else
38619 ret = drm_ioctl(filp, cmd, arg);
38620
38621 return ret;
38622diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
38623index 598c281..60d590e 100644
38624--- a/drivers/gpu/drm/mga/mga_irq.c
38625+++ b/drivers/gpu/drm/mga/mga_irq.c
38626@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
38627 if (crtc != 0)
38628 return 0;
38629
38630- return atomic_read(&dev_priv->vbl_received);
38631+ return atomic_read_unchecked(&dev_priv->vbl_received);
38632 }
38633
38634
38635@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
38636 /* VBLANK interrupt */
38637 if (status & MGA_VLINEPEN) {
38638 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
38639- atomic_inc(&dev_priv->vbl_received);
38640+ atomic_inc_unchecked(&dev_priv->vbl_received);
38641 drm_handle_vblank(dev, 0);
38642 handled = 1;
38643 }
38644@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
38645 if ((prim_start & ~0x03) != (prim_end & ~0x03))
38646 MGA_WRITE(MGA_PRIMEND, prim_end);
38647
38648- atomic_inc(&dev_priv->last_fence_retired);
38649+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
38650 DRM_WAKEUP(&dev_priv->fence_queue);
38651 handled = 1;
38652 }
38653@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
38654 * using fences.
38655 */
38656 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
38657- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
38658+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
38659 - *sequence) <= (1 << 23)));
38660
38661 *sequence = cur_fence;
38662diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
38663index 6aa2137..fe8dc55 100644
38664--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
38665+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
38666@@ -965,7 +965,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
38667 struct bit_table {
38668 const char id;
38669 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
38670-};
38671+} __no_const;
38672
38673 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
38674
38675diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
38676index f2b30f8..d0f9a95 100644
38677--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
38678+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
38679@@ -92,7 +92,7 @@ struct nouveau_drm {
38680 struct drm_global_reference mem_global_ref;
38681 struct ttm_bo_global_ref bo_global_ref;
38682 struct ttm_bo_device bdev;
38683- atomic_t validate_sequence;
38684+ atomic_unchecked_t validate_sequence;
38685 int (*move)(struct nouveau_channel *,
38686 struct ttm_buffer_object *,
38687 struct ttm_mem_reg *, struct ttm_mem_reg *);
38688diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
38689index b4b4d0c..b7edc15 100644
38690--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
38691+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
38692@@ -322,7 +322,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
38693 int ret, i;
38694 struct nouveau_bo *res_bo = NULL;
38695
38696- sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
38697+ sequence = atomic_add_return_unchecked(1, &drm->ttm.validate_sequence);
38698 retry:
38699 if (++trycnt > 100000) {
38700 NV_ERROR(cli, "%s failed and gave up.\n", __func__);
38701@@ -359,7 +359,7 @@ retry:
38702 if (ret) {
38703 validate_fini(op, NULL);
38704 if (unlikely(ret == -EAGAIN)) {
38705- sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
38706+ sequence = atomic_add_return_unchecked(1, &drm->ttm.validate_sequence);
38707 ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
38708 sequence);
38709 if (!ret)
38710diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
38711index 08214bc..9208577 100644
38712--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
38713+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
38714@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
38715 unsigned long arg)
38716 {
38717 unsigned int nr = DRM_IOCTL_NR(cmd);
38718- drm_ioctl_compat_t *fn = NULL;
38719+ drm_ioctl_compat_t fn = NULL;
38720 int ret;
38721
38722 if (nr < DRM_COMMAND_BASE)
38723diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
38724index 25d3495..d81aaf6 100644
38725--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
38726+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
38727@@ -62,7 +62,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
38728 bool can_switch;
38729
38730 spin_lock(&dev->count_lock);
38731- can_switch = (dev->open_count == 0);
38732+ can_switch = (local_read(&dev->open_count) == 0);
38733 spin_unlock(&dev->count_lock);
38734 return can_switch;
38735 }
38736diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
38737index 489cb8c..0b8d0d3 100644
38738--- a/drivers/gpu/drm/qxl/qxl_ttm.c
38739+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
38740@@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
38741 }
38742 }
38743
38744-static struct vm_operations_struct qxl_ttm_vm_ops;
38745+static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only;
38746 static const struct vm_operations_struct *ttm_vm_ops;
38747
38748 static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
38749@@ -147,8 +147,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
38750 return r;
38751 if (unlikely(ttm_vm_ops == NULL)) {
38752 ttm_vm_ops = vma->vm_ops;
38753+ pax_open_kernel();
38754 qxl_ttm_vm_ops = *ttm_vm_ops;
38755 qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
38756+ pax_close_kernel();
38757 }
38758 vma->vm_ops = &qxl_ttm_vm_ops;
38759 return 0;
38760@@ -556,25 +558,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
38761 static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
38762 {
38763 #if defined(CONFIG_DEBUG_FS)
38764- static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
38765- static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
38766- unsigned i;
38767+ static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = {
38768+ {
38769+ .name = "qxl_mem_mm",
38770+ .show = &qxl_mm_dump_table,
38771+ },
38772+ {
38773+ .name = "qxl_surf_mm",
38774+ .show = &qxl_mm_dump_table,
38775+ }
38776+ };
38777
38778- for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
38779- if (i == 0)
38780- sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
38781- else
38782- sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
38783- qxl_mem_types_list[i].name = qxl_mem_types_names[i];
38784- qxl_mem_types_list[i].show = &qxl_mm_dump_table;
38785- qxl_mem_types_list[i].driver_features = 0;
38786- if (i == 0)
38787- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
38788- else
38789- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
38790+ pax_open_kernel();
38791+ *(void **)&qxl_mem_types_list[0].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
38792+ *(void **)&qxl_mem_types_list[1].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
38793+ pax_close_kernel();
38794
38795- }
38796- return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
38797+ return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES);
38798 #else
38799 return 0;
38800 #endif
38801diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
38802index d4660cf..70dbe65 100644
38803--- a/drivers/gpu/drm/r128/r128_cce.c
38804+++ b/drivers/gpu/drm/r128/r128_cce.c
38805@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
38806
38807 /* GH: Simple idle check.
38808 */
38809- atomic_set(&dev_priv->idle_count, 0);
38810+ atomic_set_unchecked(&dev_priv->idle_count, 0);
38811
38812 /* We don't support anything other than bus-mastering ring mode,
38813 * but the ring can be in either AGP or PCI space for the ring
38814diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
38815index 930c71b..499aded 100644
38816--- a/drivers/gpu/drm/r128/r128_drv.h
38817+++ b/drivers/gpu/drm/r128/r128_drv.h
38818@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
38819 int is_pci;
38820 unsigned long cce_buffers_offset;
38821
38822- atomic_t idle_count;
38823+ atomic_unchecked_t idle_count;
38824
38825 int page_flipping;
38826 int current_page;
38827 u32 crtc_offset;
38828 u32 crtc_offset_cntl;
38829
38830- atomic_t vbl_received;
38831+ atomic_unchecked_t vbl_received;
38832
38833 u32 color_fmt;
38834 unsigned int front_offset;
38835diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
38836index a954c54..9cc595c 100644
38837--- a/drivers/gpu/drm/r128/r128_ioc32.c
38838+++ b/drivers/gpu/drm/r128/r128_ioc32.c
38839@@ -177,7 +177,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
38840 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
38841 }
38842
38843-drm_ioctl_compat_t *r128_compat_ioctls[] = {
38844+drm_ioctl_compat_t r128_compat_ioctls[] = {
38845 [DRM_R128_INIT] = compat_r128_init,
38846 [DRM_R128_DEPTH] = compat_r128_depth,
38847 [DRM_R128_STIPPLE] = compat_r128_stipple,
38848@@ -196,18 +196,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
38849 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
38850 {
38851 unsigned int nr = DRM_IOCTL_NR(cmd);
38852- drm_ioctl_compat_t *fn = NULL;
38853 int ret;
38854
38855 if (nr < DRM_COMMAND_BASE)
38856 return drm_compat_ioctl(filp, cmd, arg);
38857
38858- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls))
38859- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
38860-
38861- if (fn != NULL)
38862+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls)) {
38863+ drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
38864 ret = (*fn) (filp, cmd, arg);
38865- else
38866+ } else
38867 ret = drm_ioctl(filp, cmd, arg);
38868
38869 return ret;
38870diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
38871index 2ea4f09..d391371 100644
38872--- a/drivers/gpu/drm/r128/r128_irq.c
38873+++ b/drivers/gpu/drm/r128/r128_irq.c
38874@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
38875 if (crtc != 0)
38876 return 0;
38877
38878- return atomic_read(&dev_priv->vbl_received);
38879+ return atomic_read_unchecked(&dev_priv->vbl_received);
38880 }
38881
38882 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
38883@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
38884 /* VBLANK interrupt */
38885 if (status & R128_CRTC_VBLANK_INT) {
38886 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
38887- atomic_inc(&dev_priv->vbl_received);
38888+ atomic_inc_unchecked(&dev_priv->vbl_received);
38889 drm_handle_vblank(dev, 0);
38890 return IRQ_HANDLED;
38891 }
38892diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
38893index 19bb7e6..de7e2a2 100644
38894--- a/drivers/gpu/drm/r128/r128_state.c
38895+++ b/drivers/gpu/drm/r128/r128_state.c
38896@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
38897
38898 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
38899 {
38900- if (atomic_read(&dev_priv->idle_count) == 0)
38901+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
38902 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
38903 else
38904- atomic_set(&dev_priv->idle_count, 0);
38905+ atomic_set_unchecked(&dev_priv->idle_count, 0);
38906 }
38907
38908 #endif
38909diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
38910index 5a82b6b..9e69c73 100644
38911--- a/drivers/gpu/drm/radeon/mkregtable.c
38912+++ b/drivers/gpu/drm/radeon/mkregtable.c
38913@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
38914 regex_t mask_rex;
38915 regmatch_t match[4];
38916 char buf[1024];
38917- size_t end;
38918+ long end;
38919 int len;
38920 int done = 0;
38921 int r;
38922 unsigned o;
38923 struct offset *offset;
38924 char last_reg_s[10];
38925- int last_reg;
38926+ unsigned long last_reg;
38927
38928 if (regcomp
38929 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
38930diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
38931index b0dc0b6..a9bfe9c 100644
38932--- a/drivers/gpu/drm/radeon/radeon_device.c
38933+++ b/drivers/gpu/drm/radeon/radeon_device.c
38934@@ -1014,7 +1014,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
38935 bool can_switch;
38936
38937 spin_lock(&dev->count_lock);
38938- can_switch = (dev->open_count == 0);
38939+ can_switch = (local_read(&dev->open_count) == 0);
38940 spin_unlock(&dev->count_lock);
38941 return can_switch;
38942 }
38943diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
38944index b369d42..8dd04eb 100644
38945--- a/drivers/gpu/drm/radeon/radeon_drv.h
38946+++ b/drivers/gpu/drm/radeon/radeon_drv.h
38947@@ -258,7 +258,7 @@ typedef struct drm_radeon_private {
38948
38949 /* SW interrupt */
38950 wait_queue_head_t swi_queue;
38951- atomic_t swi_emitted;
38952+ atomic_unchecked_t swi_emitted;
38953 int vblank_crtc;
38954 uint32_t irq_enable_reg;
38955 uint32_t r500_disp_irq_reg;
38956diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
38957index c180df8..5fd8186 100644
38958--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
38959+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
38960@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
38961 request = compat_alloc_user_space(sizeof(*request));
38962 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
38963 || __put_user(req32.param, &request->param)
38964- || __put_user((void __user *)(unsigned long)req32.value,
38965+ || __put_user((unsigned long)req32.value,
38966 &request->value))
38967 return -EFAULT;
38968
38969@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
38970 #define compat_radeon_cp_setparam NULL
38971 #endif /* X86_64 || IA64 */
38972
38973-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
38974+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
38975 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
38976 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
38977 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
38978@@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
38979 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
38980 {
38981 unsigned int nr = DRM_IOCTL_NR(cmd);
38982- drm_ioctl_compat_t *fn = NULL;
38983 int ret;
38984
38985 if (nr < DRM_COMMAND_BASE)
38986 return drm_compat_ioctl(filp, cmd, arg);
38987
38988- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
38989- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
38990-
38991- if (fn != NULL)
38992+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls)) {
38993+ drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
38994 ret = (*fn) (filp, cmd, arg);
38995- else
38996+ } else
38997 ret = drm_ioctl(filp, cmd, arg);
38998
38999 return ret;
39000diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
39001index 8d68e97..9dcfed8 100644
39002--- a/drivers/gpu/drm/radeon/radeon_irq.c
39003+++ b/drivers/gpu/drm/radeon/radeon_irq.c
39004@@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev)
39005 unsigned int ret;
39006 RING_LOCALS;
39007
39008- atomic_inc(&dev_priv->swi_emitted);
39009- ret = atomic_read(&dev_priv->swi_emitted);
39010+ atomic_inc_unchecked(&dev_priv->swi_emitted);
39011+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
39012
39013 BEGIN_RING(4);
39014 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
39015@@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
39016 drm_radeon_private_t *dev_priv =
39017 (drm_radeon_private_t *) dev->dev_private;
39018
39019- atomic_set(&dev_priv->swi_emitted, 0);
39020+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
39021 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
39022
39023 dev->max_vblank_count = 0x001fffff;
39024diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
39025index 4d20910..6726b6d 100644
39026--- a/drivers/gpu/drm/radeon/radeon_state.c
39027+++ b/drivers/gpu/drm/radeon/radeon_state.c
39028@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
39029 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
39030 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
39031
39032- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
39033+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
39034 sarea_priv->nbox * sizeof(depth_boxes[0])))
39035 return -EFAULT;
39036
39037@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
39038 {
39039 drm_radeon_private_t *dev_priv = dev->dev_private;
39040 drm_radeon_getparam_t *param = data;
39041- int value;
39042+ int value = 0;
39043
39044 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
39045
39046diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
39047index 6c0ce89..57a2529 100644
39048--- a/drivers/gpu/drm/radeon/radeon_ttm.c
39049+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
39050@@ -782,7 +782,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
39051 man->size = size >> PAGE_SHIFT;
39052 }
39053
39054-static struct vm_operations_struct radeon_ttm_vm_ops;
39055+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
39056 static const struct vm_operations_struct *ttm_vm_ops = NULL;
39057
39058 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
39059@@ -823,8 +823,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
39060 }
39061 if (unlikely(ttm_vm_ops == NULL)) {
39062 ttm_vm_ops = vma->vm_ops;
39063+ pax_open_kernel();
39064 radeon_ttm_vm_ops = *ttm_vm_ops;
39065 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
39066+ pax_close_kernel();
39067 }
39068 vma->vm_ops = &radeon_ttm_vm_ops;
39069 return 0;
39070@@ -853,38 +855,33 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
39071 static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
39072 {
39073 #if defined(CONFIG_DEBUG_FS)
39074- static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2];
39075- static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32];
39076+ static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2] = {
39077+ {
39078+ .name = "radeon_vram_mm",
39079+ .show = &radeon_mm_dump_table,
39080+ },
39081+ {
39082+ .name = "radeon_gtt_mm",
39083+ .show = &radeon_mm_dump_table,
39084+ },
39085+ {
39086+ .name = "ttm_page_pool",
39087+ .show = &ttm_page_alloc_debugfs,
39088+ },
39089+ {
39090+ .name = "ttm_dma_page_pool",
39091+ .show = &ttm_dma_page_alloc_debugfs,
39092+ },
39093+ };
39094 unsigned i;
39095
39096- for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
39097- if (i == 0)
39098- sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
39099- else
39100- sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
39101- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
39102- radeon_mem_types_list[i].show = &radeon_mm_dump_table;
39103- radeon_mem_types_list[i].driver_features = 0;
39104- if (i == 0)
39105- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
39106- else
39107- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
39108-
39109- }
39110- /* Add ttm page pool to debugfs */
39111- sprintf(radeon_mem_types_names[i], "ttm_page_pool");
39112- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
39113- radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
39114- radeon_mem_types_list[i].driver_features = 0;
39115- radeon_mem_types_list[i++].data = NULL;
39116+ pax_open_kernel();
39117+ *(void **)&radeon_mem_types_list[0].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
39118+ *(void **)&radeon_mem_types_list[1].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
39119+ pax_close_kernel();
39120 #ifdef CONFIG_SWIOTLB
39121- if (swiotlb_nr_tbl()) {
39122- sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
39123- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
39124- radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
39125- radeon_mem_types_list[i].driver_features = 0;
39126- radeon_mem_types_list[i++].data = NULL;
39127- }
39128+ if (swiotlb_nr_tbl())
39129+ i++;
39130 #endif
39131 return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
39132
39133diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
39134index 55880d5..9e95342 100644
39135--- a/drivers/gpu/drm/radeon/rs690.c
39136+++ b/drivers/gpu/drm/radeon/rs690.c
39137@@ -327,9 +327,11 @@ static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
39138 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
39139 rdev->pm.sideport_bandwidth.full)
39140 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
39141- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
39142+ read_delay_latency.full = dfixed_const(800 * 1000);
39143 read_delay_latency.full = dfixed_div(read_delay_latency,
39144 rdev->pm.igp_sideport_mclk);
39145+ a.full = dfixed_const(370);
39146+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
39147 } else {
39148 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
39149 rdev->pm.k8_bandwidth.full)
39150diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
39151index dbc2def..0a9f710 100644
39152--- a/drivers/gpu/drm/ttm/ttm_memory.c
39153+++ b/drivers/gpu/drm/ttm/ttm_memory.c
39154@@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
39155 zone->glob = glob;
39156 glob->zone_kernel = zone;
39157 ret = kobject_init_and_add(
39158- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
39159+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
39160 if (unlikely(ret != 0)) {
39161 kobject_put(&zone->kobj);
39162 return ret;
39163@@ -347,7 +347,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
39164 zone->glob = glob;
39165 glob->zone_dma32 = zone;
39166 ret = kobject_init_and_add(
39167- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
39168+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
39169 if (unlikely(ret != 0)) {
39170 kobject_put(&zone->kobj);
39171 return ret;
39172diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
39173index bd2a3b4..122d9ad 100644
39174--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
39175+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
39176@@ -394,9 +394,9 @@ static int ttm_pool_get_num_unused_pages(void)
39177 static int ttm_pool_mm_shrink(struct shrinker *shrink,
39178 struct shrink_control *sc)
39179 {
39180- static atomic_t start_pool = ATOMIC_INIT(0);
39181+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
39182 unsigned i;
39183- unsigned pool_offset = atomic_add_return(1, &start_pool);
39184+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
39185 struct ttm_page_pool *pool;
39186 int shrink_pages = sc->nr_to_scan;
39187
39188diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
39189index dc0c065..58a0782 100644
39190--- a/drivers/gpu/drm/udl/udl_fb.c
39191+++ b/drivers/gpu/drm/udl/udl_fb.c
39192@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
39193 fb_deferred_io_cleanup(info);
39194 kfree(info->fbdefio);
39195 info->fbdefio = NULL;
39196- info->fbops->fb_mmap = udl_fb_mmap;
39197 }
39198
39199 pr_warn("released /dev/fb%d user=%d count=%d\n",
39200diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
39201index 893a650..6190d3b 100644
39202--- a/drivers/gpu/drm/via/via_drv.h
39203+++ b/drivers/gpu/drm/via/via_drv.h
39204@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
39205 typedef uint32_t maskarray_t[5];
39206
39207 typedef struct drm_via_irq {
39208- atomic_t irq_received;
39209+ atomic_unchecked_t irq_received;
39210 uint32_t pending_mask;
39211 uint32_t enable_mask;
39212 wait_queue_head_t irq_queue;
39213@@ -75,7 +75,7 @@ typedef struct drm_via_private {
39214 struct timeval last_vblank;
39215 int last_vblank_valid;
39216 unsigned usec_per_vblank;
39217- atomic_t vbl_received;
39218+ atomic_unchecked_t vbl_received;
39219 drm_via_state_t hc_state;
39220 char pci_buf[VIA_PCI_BUF_SIZE];
39221 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
39222diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
39223index ac98964..5dbf512 100644
39224--- a/drivers/gpu/drm/via/via_irq.c
39225+++ b/drivers/gpu/drm/via/via_irq.c
39226@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
39227 if (crtc != 0)
39228 return 0;
39229
39230- return atomic_read(&dev_priv->vbl_received);
39231+ return atomic_read_unchecked(&dev_priv->vbl_received);
39232 }
39233
39234 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
39235@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
39236
39237 status = VIA_READ(VIA_REG_INTERRUPT);
39238 if (status & VIA_IRQ_VBLANK_PENDING) {
39239- atomic_inc(&dev_priv->vbl_received);
39240- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
39241+ atomic_inc_unchecked(&dev_priv->vbl_received);
39242+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
39243 do_gettimeofday(&cur_vblank);
39244 if (dev_priv->last_vblank_valid) {
39245 dev_priv->usec_per_vblank =
39246@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
39247 dev_priv->last_vblank = cur_vblank;
39248 dev_priv->last_vblank_valid = 1;
39249 }
39250- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
39251+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
39252 DRM_DEBUG("US per vblank is: %u\n",
39253 dev_priv->usec_per_vblank);
39254 }
39255@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
39256
39257 for (i = 0; i < dev_priv->num_irqs; ++i) {
39258 if (status & cur_irq->pending_mask) {
39259- atomic_inc(&cur_irq->irq_received);
39260+ atomic_inc_unchecked(&cur_irq->irq_received);
39261 DRM_WAKEUP(&cur_irq->irq_queue);
39262 handled = 1;
39263 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
39264@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
39265 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
39266 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
39267 masks[irq][4]));
39268- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
39269+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
39270 } else {
39271 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
39272 (((cur_irq_sequence =
39273- atomic_read(&cur_irq->irq_received)) -
39274+ atomic_read_unchecked(&cur_irq->irq_received)) -
39275 *sequence) <= (1 << 23)));
39276 }
39277 *sequence = cur_irq_sequence;
39278@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
39279 }
39280
39281 for (i = 0; i < dev_priv->num_irqs; ++i) {
39282- atomic_set(&cur_irq->irq_received, 0);
39283+ atomic_set_unchecked(&cur_irq->irq_received, 0);
39284 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
39285 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
39286 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
39287@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
39288 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
39289 case VIA_IRQ_RELATIVE:
39290 irqwait->request.sequence +=
39291- atomic_read(&cur_irq->irq_received);
39292+ atomic_read_unchecked(&cur_irq->irq_received);
39293 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
39294 case VIA_IRQ_ABSOLUTE:
39295 break;
39296diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
39297index 13aeda7..4a952d1 100644
39298--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
39299+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
39300@@ -290,7 +290,7 @@ struct vmw_private {
39301 * Fencing and IRQs.
39302 */
39303
39304- atomic_t marker_seq;
39305+ atomic_unchecked_t marker_seq;
39306 wait_queue_head_t fence_queue;
39307 wait_queue_head_t fifo_queue;
39308 int fence_queue_waiters; /* Protected by hw_mutex */
39309diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
39310index 3eb1486..0a47ee9 100644
39311--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
39312+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
39313@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
39314 (unsigned int) min,
39315 (unsigned int) fifo->capabilities);
39316
39317- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
39318+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
39319 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
39320 vmw_marker_queue_init(&fifo->marker_queue);
39321 return vmw_fifo_send_fence(dev_priv, &dummy);
39322@@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
39323 if (reserveable)
39324 iowrite32(bytes, fifo_mem +
39325 SVGA_FIFO_RESERVED);
39326- return fifo_mem + (next_cmd >> 2);
39327+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
39328 } else {
39329 need_bounce = true;
39330 }
39331@@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
39332
39333 fm = vmw_fifo_reserve(dev_priv, bytes);
39334 if (unlikely(fm == NULL)) {
39335- *seqno = atomic_read(&dev_priv->marker_seq);
39336+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
39337 ret = -ENOMEM;
39338 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
39339 false, 3*HZ);
39340@@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
39341 }
39342
39343 do {
39344- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
39345+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
39346 } while (*seqno == 0);
39347
39348 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
39349diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
39350index c509d40..3b640c3 100644
39351--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
39352+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
39353@@ -138,7 +138,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
39354 int ret;
39355
39356 num_clips = arg->num_clips;
39357- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
39358+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
39359
39360 if (unlikely(num_clips == 0))
39361 return 0;
39362@@ -222,7 +222,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
39363 int ret;
39364
39365 num_clips = arg->num_clips;
39366- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
39367+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
39368
39369 if (unlikely(num_clips == 0))
39370 return 0;
39371diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
39372index 4640adb..e1384ed 100644
39373--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
39374+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
39375@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
39376 * emitted. Then the fence is stale and signaled.
39377 */
39378
39379- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
39380+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
39381 > VMW_FENCE_WRAP);
39382
39383 return ret;
39384@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
39385
39386 if (fifo_idle)
39387 down_read(&fifo_state->rwsem);
39388- signal_seq = atomic_read(&dev_priv->marker_seq);
39389+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
39390 ret = 0;
39391
39392 for (;;) {
39393diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
39394index 8a8725c2..afed796 100644
39395--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
39396+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
39397@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
39398 while (!vmw_lag_lt(queue, us)) {
39399 spin_lock(&queue->lock);
39400 if (list_empty(&queue->head))
39401- seqno = atomic_read(&dev_priv->marker_seq);
39402+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
39403 else {
39404 marker = list_first_entry(&queue->head,
39405 struct vmw_marker, head);
39406diff --git a/drivers/gpu/host1x/drm/dc.c b/drivers/gpu/host1x/drm/dc.c
39407index 8c04943..4370ed9 100644
39408--- a/drivers/gpu/host1x/drm/dc.c
39409+++ b/drivers/gpu/host1x/drm/dc.c
39410@@ -999,7 +999,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
39411 }
39412
39413 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
39414- dc->debugfs_files[i].data = dc;
39415+ *(void **)&dc->debugfs_files[i].data = dc;
39416
39417 err = drm_debugfs_create_files(dc->debugfs_files,
39418 ARRAY_SIZE(debugfs_files),
39419diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
e2b79cd1 39420index 402f486..5340852 100644
bb5f0bf8
AF
39421--- a/drivers/hid/hid-core.c
39422+++ b/drivers/hid/hid-core.c
e2b79cd1
AF
39423@@ -63,6 +63,8 @@ struct hid_report *hid_register_report(struct hid_device *device, unsigned type,
39424 struct hid_report_enum *report_enum = device->report_enum + type;
39425 struct hid_report *report;
39426
39427+ if (id >= HID_MAX_IDS)
39428+ return NULL;
39429 if (report_enum->report_id_hash[id])
39430 return report_enum->report_id_hash[id];
39431
39432@@ -404,8 +406,10 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
39433
39434 case HID_GLOBAL_ITEM_TAG_REPORT_ID:
39435 parser->global.report_id = item_udata(item);
39436- if (parser->global.report_id == 0) {
39437- hid_err(parser->device, "report_id 0 is invalid\n");
39438+ if (parser->global.report_id == 0 ||
39439+ parser->global.report_id >= HID_MAX_IDS) {
39440+ hid_err(parser->device, "report_id %u is invalid\n",
39441+ parser->global.report_id);
39442 return -1;
39443 }
39444 return 0;
39445@@ -575,7 +579,7 @@ static void hid_close_report(struct hid_device *device)
39446 for (i = 0; i < HID_REPORT_TYPES; i++) {
39447 struct hid_report_enum *report_enum = device->report_enum + i;
39448
39449- for (j = 0; j < 256; j++) {
39450+ for (j = 0; j < HID_MAX_IDS; j++) {
39451 struct hid_report *report = report_enum->report_id_hash[j];
39452 if (report)
39453 hid_free_report(report);
39454@@ -755,6 +759,56 @@ int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size)
39455 }
39456 EXPORT_SYMBOL_GPL(hid_parse_report);
39457
39458+static const char * const hid_report_names[] = {
39459+ "HID_INPUT_REPORT",
39460+ "HID_OUTPUT_REPORT",
39461+ "HID_FEATURE_REPORT",
39462+};
39463+/**
39464+ * hid_validate_report - validate existing device report
39465+ *
39466+ * @device: hid device
39467+ * @type: which report type to examine
39468+ * @id: which report ID to examine (0 for first)
39469+ * @fields: expected number of fields
39470+ * @report_counts: expected number of values per field
39471+ *
39472+ * Validate the report details after parsing.
39473+ */
39474+struct hid_report *hid_validate_report(struct hid_device *hid,
39475+ unsigned int type, unsigned int id,
39476+ unsigned int fields,
39477+ unsigned int report_counts)
39478+{
39479+ struct hid_report *report;
39480+ unsigned int i;
39481+
39482+ if (type > HID_FEATURE_REPORT) {
39483+ hid_err(hid, "invalid HID report %u\n", type);
39484+ return NULL;
39485+ }
39486+
39487+ report = hid->report_enum[type].report_id_hash[id];
39488+ if (!report) {
39489+ hid_err(hid, "missing %s %u\n", hid_report_names[type], id);
39490+ return NULL;
39491+ }
39492+ if (report->maxfield < fields) {
39493+ hid_err(hid, "not enough fields in %s %u\n",
39494+ hid_report_names[type], id);
39495+ return NULL;
39496+ }
39497+ for (i = 0; i < fields; i++) {
39498+ if (report->field[i]->report_count < report_counts) {
39499+ hid_err(hid, "not enough values in %s %u fields\n",
39500+ hid_report_names[type], id);
39501+ return NULL;
39502+ }
39503+ }
39504+ return report;
39505+}
39506+EXPORT_SYMBOL_GPL(hid_validate_report);
39507+
39508 /**
39509 * hid_open_report - open a driver-specific device report
39510 *
39511@@ -1152,7 +1206,12 @@ EXPORT_SYMBOL_GPL(hid_output_report);
39512
39513 int hid_set_field(struct hid_field *field, unsigned offset, __s32 value)
39514 {
39515- unsigned size = field->report_size;
39516+ unsigned size;
39517+
39518+ if (!field)
39519+ return -1;
39520+
39521+ size = field->report_size;
39522
39523 hid_dump_input(field->report->device, field->usage + offset, value);
39524
39525@@ -2275,7 +2334,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
bb5f0bf8
AF
39526
39527 int hid_add_device(struct hid_device *hdev)
39528 {
39529- static atomic_t id = ATOMIC_INIT(0);
39530+ static atomic_unchecked_t id = ATOMIC_INIT(0);
39531 int ret;
39532
39533 if (WARN_ON(hdev->status & HID_STAT_ADDED))
e2b79cd1 39534@@ -2309,7 +2368,7 @@ int hid_add_device(struct hid_device *hdev)
bb5f0bf8
AF
39535 /* XXX hack, any other cleaner solution after the driver core
39536 * is converted to allow more than 20 bytes as the device name? */
39537 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
39538- hdev->vendor, hdev->product, atomic_inc_return(&id));
39539+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
39540
39541 hid_debug_register(hdev, dev_name(&hdev->dev));
39542 ret = device_add(&hdev->dev);
e2b79cd1
AF
39543diff --git a/drivers/hid/hid-lenovo-tpkbd.c b/drivers/hid/hid-lenovo-tpkbd.c
39544index 07837f5..b697ada 100644
39545--- a/drivers/hid/hid-lenovo-tpkbd.c
39546+++ b/drivers/hid/hid-lenovo-tpkbd.c
39547@@ -341,6 +341,11 @@ static int tpkbd_probe_tp(struct hid_device *hdev)
39548 char *name_mute, *name_micmute;
39549 int ret;
39550
39551+ /* Validate required reports. */
39552+ if (!hid_validate_report(hdev, HID_OUTPUT_REPORT, 4, 4, 1) ||
39553+ !hid_validate_report(hdev, HID_OUTPUT_REPORT, 3, 1, 2))
39554+ return -ENODEV;
39555+
39556 if (sysfs_create_group(&hdev->dev.kobj,
39557 &tpkbd_attr_group_pointer)) {
39558 hid_warn(hdev, "Could not create sysfs group\n");
39559diff --git a/drivers/hid/hid-lg2ff.c b/drivers/hid/hid-lg2ff.c
39560index b3cd150..9805197 100644
39561--- a/drivers/hid/hid-lg2ff.c
39562+++ b/drivers/hid/hid-lg2ff.c
39563@@ -64,26 +64,13 @@ int lg2ff_init(struct hid_device *hid)
39564 struct hid_report *report;
39565 struct hid_input *hidinput = list_entry(hid->inputs.next,
39566 struct hid_input, list);
39567- struct list_head *report_list =
39568- &hid->report_enum[HID_OUTPUT_REPORT].report_list;
39569 struct input_dev *dev = hidinput->input;
39570 int error;
39571
39572- if (list_empty(report_list)) {
39573- hid_err(hid, "no output report found\n");
39574+ /* Check that the report looks ok */
39575+ report = hid_validate_report(hid, HID_OUTPUT_REPORT, 0, 1, 7);
39576+ if (!report)
39577 return -ENODEV;
39578- }
39579-
39580- report = list_entry(report_list->next, struct hid_report, list);
39581-
39582- if (report->maxfield < 1) {
39583- hid_err(hid, "output report is empty\n");
39584- return -ENODEV;
39585- }
39586- if (report->field[0]->report_count < 7) {
39587- hid_err(hid, "not enough values in the field\n");
39588- return -ENODEV;
39589- }
39590
39591 lg2ff = kmalloc(sizeof(struct lg2ff_device), GFP_KERNEL);
39592 if (!lg2ff)
39593diff --git a/drivers/hid/hid-lg3ff.c b/drivers/hid/hid-lg3ff.c
39594index e52f181..53ac79b 100644
39595--- a/drivers/hid/hid-lg3ff.c
39596+++ b/drivers/hid/hid-lg3ff.c
39597@@ -66,10 +66,11 @@ static int hid_lg3ff_play(struct input_dev *dev, void *data,
39598 int x, y;
39599
39600 /*
39601- * Maxusage should always be 63 (maximum fields)
39602- * likely a better way to ensure this data is clean
39603+ * Available values in the field should always be 63, but we only use up to
39604+ * 35. Instead, clear the entire area, however big it is.
39605 */
39606- memset(report->field[0]->value, 0, sizeof(__s32)*report->field[0]->maxusage);
39607+ memset(report->field[0]->value, 0,
39608+ sizeof(__s32) * report->field[0]->report_count);
39609
39610 switch (effect->type) {
39611 case FF_CONSTANT:
39612@@ -129,32 +130,14 @@ static const signed short ff3_joystick_ac[] = {
39613 int lg3ff_init(struct hid_device *hid)
39614 {
39615 struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
39616- struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
39617 struct input_dev *dev = hidinput->input;
39618- struct hid_report *report;
39619- struct hid_field *field;
39620 const signed short *ff_bits = ff3_joystick_ac;
39621 int error;
39622 int i;
39623
39624- /* Find the report to use */
39625- if (list_empty(report_list)) {
39626- hid_err(hid, "No output report found\n");
39627- return -1;
39628- }
39629-
39630 /* Check that the report looks ok */
39631- report = list_entry(report_list->next, struct hid_report, list);
39632- if (!report) {
39633- hid_err(hid, "NULL output report\n");
39634- return -1;
39635- }
39636-
39637- field = report->field[0];
39638- if (!field) {
39639- hid_err(hid, "NULL field\n");
39640- return -1;
39641- }
39642+ if (!hid_validate_report(hid, HID_OUTPUT_REPORT, 0, 1, 35))
39643+ return -ENODEV;
39644
39645 /* Assume single fixed device G940 */
39646 for (i = 0; ff_bits[i] >= 0; i++)
39647diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
39648index 0ddae2a..8b89f0f 100644
39649--- a/drivers/hid/hid-lg4ff.c
39650+++ b/drivers/hid/hid-lg4ff.c
39651@@ -484,34 +484,16 @@ static enum led_brightness lg4ff_led_get_brightness(struct led_classdev *led_cde
39652 int lg4ff_init(struct hid_device *hid)
39653 {
39654 struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
39655- struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
39656 struct input_dev *dev = hidinput->input;
39657- struct hid_report *report;
39658- struct hid_field *field;
39659 struct lg4ff_device_entry *entry;
39660 struct lg_drv_data *drv_data;
39661 struct usb_device_descriptor *udesc;
39662 int error, i, j;
39663 __u16 bcdDevice, rev_maj, rev_min;
39664
39665- /* Find the report to use */
39666- if (list_empty(report_list)) {
39667- hid_err(hid, "No output report found\n");
39668- return -1;
39669- }
39670-
39671 /* Check that the report looks ok */
39672- report = list_entry(report_list->next, struct hid_report, list);
39673- if (!report) {
39674- hid_err(hid, "NULL output report\n");
39675+ if (!hid_validate_report(hid, HID_OUTPUT_REPORT, 0, 1, 7))
39676 return -1;
39677- }
39678-
39679- field = report->field[0];
39680- if (!field) {
39681- hid_err(hid, "NULL field\n");
39682- return -1;
39683- }
39684
39685 /* Check what wheel has been connected */
39686 for (i = 0; i < ARRAY_SIZE(lg4ff_devices); i++) {
39687diff --git a/drivers/hid/hid-lgff.c b/drivers/hid/hid-lgff.c
39688index d7ea8c8..a84fb40 100644
39689--- a/drivers/hid/hid-lgff.c
39690+++ b/drivers/hid/hid-lgff.c
39691@@ -128,27 +128,14 @@ static void hid_lgff_set_autocenter(struct input_dev *dev, u16 magnitude)
39692 int lgff_init(struct hid_device* hid)
39693 {
39694 struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
39695- struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
39696 struct input_dev *dev = hidinput->input;
39697- struct hid_report *report;
39698- struct hid_field *field;
39699 const signed short *ff_bits = ff_joystick;
39700 int error;
39701 int i;
39702
39703- /* Find the report to use */
39704- if (list_empty(report_list)) {
39705- hid_err(hid, "No output report found\n");
39706- return -1;
39707- }
39708-
39709 /* Check that the report looks ok */
39710- report = list_entry(report_list->next, struct hid_report, list);
39711- field = report->field[0];
39712- if (!field) {
39713- hid_err(hid, "NULL field\n");
39714- return -1;
39715- }
39716+ if (!hid_validate_report(hid, HID_OUTPUT_REPORT, 0, 1, 7))
39717+ return -ENODEV;
39718
39719 for (i = 0; i < ARRAY_SIZE(devices); i++) {
39720 if (dev->id.vendor == devices[i].idVendor &&
39721diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
39722index 5207591a..6c9197f 100644
39723--- a/drivers/hid/hid-logitech-dj.c
39724+++ b/drivers/hid/hid-logitech-dj.c
39725@@ -421,7 +421,7 @@ static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
39726 struct hid_report *report;
39727 struct hid_report_enum *output_report_enum;
39728 u8 *data = (u8 *)(&dj_report->device_index);
39729- int i;
39730+ unsigned int i, length;
39731
39732 output_report_enum = &hdev->report_enum[HID_OUTPUT_REPORT];
39733 report = output_report_enum->report_id_hash[REPORT_ID_DJ_SHORT];
39734@@ -431,7 +431,9 @@ static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
39735 return -ENODEV;
39736 }
39737
39738- for (i = 0; i < report->field[0]->report_count; i++)
39739+ length = min_t(size_t, sizeof(*dj_report) - 1,
39740+ report->field[0]->report_count);
39741+ for (i = 0; i < length; i++)
39742 report->field[0]->value[i] = data[i];
39743
39744 hid_hw_request(hdev, report, HID_REQ_SET_REPORT);
39745@@ -738,6 +740,12 @@ static int logi_dj_probe(struct hid_device *hdev,
39746 goto hid_parse_fail;
39747 }
39748
39749+ if (!hid_validate_report(hdev, HID_OUTPUT_REPORT, REPORT_ID_DJ_SHORT,
39750+ 1, 3)) {
39751+ retval = -ENODEV;
39752+ goto hid_parse_fail;
39753+ }
39754+
39755 /* Starts the usb device and connects to upper interfaces hiddev and
39756 * hidraw */
39757 retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
39758diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
39759index d39a5ce..4892dfc 100644
39760--- a/drivers/hid/hid-multitouch.c
39761+++ b/drivers/hid/hid-multitouch.c
39762@@ -330,9 +330,18 @@ static void mt_feature_mapping(struct hid_device *hdev,
39763 break;
39764 }
39765 }
39766+ /* Ignore if value index is out of bounds. */
39767+ if (td->inputmode_index < 0 ||
39768+ td->inputmode_index >= field->report_count) {
39769+ dev_err(&hdev->dev, "HID_DG_INPUTMODE out of range\n");
39770+ td->inputmode = -1;
39771+ }
39772
39773 break;
39774 case HID_DG_CONTACTMAX:
39775+ /* Ignore if value count is out of bounds. */
39776+ if (field->report_count < 1)
39777+ break;
39778 td->maxcontact_report_id = field->report->id;
39779 td->maxcontacts = field->value[0];
39780 if (!td->maxcontacts &&
39781@@ -743,15 +752,21 @@ static void mt_touch_report(struct hid_device *hid, struct hid_report *report)
39782 unsigned count;
39783 int r, n;
39784
39785+ if (report->maxfield == 0)
39786+ return;
39787+
39788 /*
39789 * Includes multi-packet support where subsequent
39790 * packets are sent with zero contactcount.
39791 */
39792- if (td->cc_index >= 0) {
39793- struct hid_field *field = report->field[td->cc_index];
39794- int value = field->value[td->cc_value_index];
39795- if (value)
39796- td->num_expected = value;
39797+ if (td->cc_index >= 0 && td->cc_index < report->maxfield) {
39798+ field = report->field[td->cc_index];
39799+ if (td->cc_value_index >= 0 &&
39800+ td->cc_value_index < field->report_count) {
39801+ int value = field->value[td->cc_value_index];
39802+ if (value)
39803+ td->num_expected = value;
39804+ }
39805 }
39806
39807 for (r = 0; r < report->maxfield; r++) {
39808diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
39809index ef95102..5482156 100644
39810--- a/drivers/hid/hid-ntrig.c
39811+++ b/drivers/hid/hid-ntrig.c
39812@@ -115,7 +115,8 @@ static inline int ntrig_get_mode(struct hid_device *hdev)
39813 struct hid_report *report = hdev->report_enum[HID_FEATURE_REPORT].
39814 report_id_hash[0x0d];
39815
39816- if (!report)
39817+ if (!report || report->maxfield < 1 ||
39818+ report->field[0]->report_count < 1)
39819 return -EINVAL;
39820
39821 hid_hw_request(hdev, report, HID_REQ_GET_REPORT);
39822diff --git a/drivers/hid/hid-picolcd_core.c b/drivers/hid/hid-picolcd_core.c
39823index b48092d..72bba1e 100644
39824--- a/drivers/hid/hid-picolcd_core.c
39825+++ b/drivers/hid/hid-picolcd_core.c
39826@@ -290,7 +290,7 @@ static ssize_t picolcd_operation_mode_store(struct device *dev,
39827 buf += 10;
39828 cnt -= 10;
39829 }
39830- if (!report)
39831+ if (!report || report->maxfield < 1)
39832 return -EINVAL;
39833
39834 while (cnt > 0 && (buf[cnt-1] == '\n' || buf[cnt-1] == '\r'))
39835diff --git a/drivers/hid/hid-pl.c b/drivers/hid/hid-pl.c
39836index d29112f..2dcd7d9 100644
39837--- a/drivers/hid/hid-pl.c
39838+++ b/drivers/hid/hid-pl.c
39839@@ -132,8 +132,14 @@ static int plff_init(struct hid_device *hid)
39840 strong = &report->field[0]->value[2];
39841 weak = &report->field[0]->value[3];
39842 debug("detected single-field device");
39843- } else if (report->maxfield >= 4 && report->field[0]->maxusage == 1 &&
39844- report->field[0]->usage[0].hid == (HID_UP_LED | 0x43)) {
39845+ } else if (report->field[0]->maxusage == 1 &&
39846+ report->field[0]->usage[0].hid ==
39847+ (HID_UP_LED | 0x43) &&
39848+ report->maxfield >= 4 &&
39849+ report->field[0]->report_count >= 1 &&
39850+ report->field[1]->report_count >= 1 &&
39851+ report->field[2]->report_count >= 1 &&
39852+ report->field[3]->report_count >= 1) {
39853 report->field[0]->value[0] = 0x00;
39854 report->field[1]->value[0] = 0x00;
39855 strong = &report->field[2]->value[0];
39856diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
39857index ca749810..aa34755 100644
39858--- a/drivers/hid/hid-sensor-hub.c
39859+++ b/drivers/hid/hid-sensor-hub.c
39860@@ -221,7 +221,8 @@ int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
39861
39862 mutex_lock(&data->mutex);
39863 report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT);
39864- if (!report || (field_index >= report->maxfield)) {
39865+ if (!report || (field_index >= report->maxfield) ||
39866+ report->field[field_index]->report_count < 1) {
39867 ret = -EINVAL;
39868 goto done_proc;
39869 }
39870diff --git a/drivers/hid/hid-steelseries.c b/drivers/hid/hid-steelseries.c
39871index d164911..ef42e86 100644
39872--- a/drivers/hid/hid-steelseries.c
39873+++ b/drivers/hid/hid-steelseries.c
39874@@ -249,6 +249,11 @@ static int steelseries_srws1_probe(struct hid_device *hdev,
39875 goto err_free;
39876 }
39877
39878+ if (!hid_validate_report(hdev, HID_OUTPUT_REPORT, 0, 1, 16)) {
39879+ ret = -ENODEV;
39880+ goto err_free;
39881+ }
39882+
39883 ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
39884 if (ret) {
39885 hid_err(hdev, "hw start failed\n");
bb5f0bf8
AF
39886diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
39887index 90124ff..3761764 100644
39888--- a/drivers/hid/hid-wiimote-debug.c
39889+++ b/drivers/hid/hid-wiimote-debug.c
39890@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
39891 else if (size == 0)
39892 return -EIO;
39893
39894- if (copy_to_user(u, buf, size))
39895+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
39896 return -EFAULT;
39897
39898 *off += size;
e2b79cd1
AF
39899diff --git a/drivers/hid/hid-zpff.c b/drivers/hid/hid-zpff.c
39900index 6ec28a3..b124991 100644
39901--- a/drivers/hid/hid-zpff.c
39902+++ b/drivers/hid/hid-zpff.c
39903@@ -68,22 +68,12 @@ static int zpff_init(struct hid_device *hid)
39904 struct hid_report *report;
39905 struct hid_input *hidinput = list_entry(hid->inputs.next,
39906 struct hid_input, list);
39907- struct list_head *report_list =
39908- &hid->report_enum[HID_OUTPUT_REPORT].report_list;
39909 struct input_dev *dev = hidinput->input;
39910 int error;
39911
39912- if (list_empty(report_list)) {
39913- hid_err(hid, "no output report found\n");
39914+ report = hid_validate_report(hid, HID_OUTPUT_REPORT, 0, 4, 1);
39915+ if (!report)
39916 return -ENODEV;
39917- }
39918-
39919- report = list_entry(report_list->next, struct hid_report, list);
39920-
39921- if (report->maxfield < 4) {
39922- hid_err(hid, "not enough fields in report\n");
39923- return -ENODEV;
39924- }
39925
39926 zpff = kzalloc(sizeof(struct zpff_device), GFP_KERNEL);
39927 if (!zpff)
39928diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
39929index fc307e0..2b255e8 100644
39930--- a/drivers/hid/uhid.c
39931+++ b/drivers/hid/uhid.c
39932@@ -47,7 +47,7 @@ struct uhid_device {
39933 struct mutex report_lock;
39934 wait_queue_head_t report_wait;
39935 atomic_t report_done;
39936- atomic_t report_id;
39937+ atomic_unchecked_t report_id;
39938 struct uhid_event report_buf;
39939 };
39940
39941@@ -187,7 +187,7 @@ static int uhid_hid_get_raw(struct hid_device *hid, unsigned char rnum,
39942
39943 spin_lock_irqsave(&uhid->qlock, flags);
39944 ev->type = UHID_FEATURE;
39945- ev->u.feature.id = atomic_inc_return(&uhid->report_id);
39946+ ev->u.feature.id = atomic_inc_return_unchecked(&uhid->report_id);
39947 ev->u.feature.rnum = rnum;
39948 ev->u.feature.rtype = report_type;
39949
39950@@ -471,7 +471,7 @@ static int uhid_dev_feature_answer(struct uhid_device *uhid,
39951 spin_lock_irqsave(&uhid->qlock, flags);
39952
39953 /* id for old report; drop it silently */
39954- if (atomic_read(&uhid->report_id) != ev->u.feature_answer.id)
39955+ if (atomic_read_unchecked(&uhid->report_id) != ev->u.feature_answer.id)
39956 goto unlock;
39957 if (atomic_read(&uhid->report_done))
39958 goto unlock;
bb5f0bf8
AF
39959diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
39960index 0b122f8..b1d8160 100644
39961--- a/drivers/hv/channel.c
39962+++ b/drivers/hv/channel.c
39963@@ -394,8 +394,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
39964 int ret = 0;
39965 int t;
39966
39967- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
39968- atomic_inc(&vmbus_connection.next_gpadl_handle);
39969+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
39970+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
39971
39972 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
39973 if (ret)
39974diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
39975index ae49237..380d4c9 100644
39976--- a/drivers/hv/hv.c
39977+++ b/drivers/hv/hv.c
39978@@ -112,7 +112,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
39979 u64 output_address = (output) ? virt_to_phys(output) : 0;
39980 u32 output_address_hi = output_address >> 32;
39981 u32 output_address_lo = output_address & 0xFFFFFFFF;
39982- void *hypercall_page = hv_context.hypercall_page;
39983+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
39984
39985 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
39986 "=a"(hv_status_lo) : "d" (control_hi),
39987diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
39988index 12f2f9e..679603c 100644
39989--- a/drivers/hv/hyperv_vmbus.h
39990+++ b/drivers/hv/hyperv_vmbus.h
39991@@ -591,7 +591,7 @@ enum vmbus_connect_state {
39992 struct vmbus_connection {
39993 enum vmbus_connect_state conn_state;
39994
39995- atomic_t next_gpadl_handle;
39996+ atomic_unchecked_t next_gpadl_handle;
39997
39998 /*
39999 * Represents channel interrupts. Each bit position represents a
40000diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
40001index 4004e54..c2de226 100644
40002--- a/drivers/hv/vmbus_drv.c
40003+++ b/drivers/hv/vmbus_drv.c
40004@@ -668,10 +668,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
40005 {
40006 int ret = 0;
40007
40008- static atomic_t device_num = ATOMIC_INIT(0);
40009+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
40010
40011 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
40012- atomic_inc_return(&device_num));
40013+ atomic_inc_return_unchecked(&device_num));
40014
40015 child_device_obj->device.bus = &hv_bus;
40016 child_device_obj->device.parent = &hv_acpi_dev->dev;
40017diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
40018index 6351aba..dc4aaf4 100644
40019--- a/drivers/hwmon/acpi_power_meter.c
40020+++ b/drivers/hwmon/acpi_power_meter.c
40021@@ -117,7 +117,7 @@ struct sensor_template {
40022 struct device_attribute *devattr,
40023 const char *buf, size_t count);
40024 int index;
40025-};
40026+} __do_const;
40027
40028 /* Averaging interval */
40029 static int update_avg_interval(struct acpi_power_meter_resource *resource)
40030@@ -629,7 +629,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
40031 struct sensor_template *attrs)
40032 {
40033 struct device *dev = &resource->acpi_dev->dev;
40034- struct sensor_device_attribute *sensors =
40035+ sensor_device_attribute_no_const *sensors =
40036 &resource->sensors[resource->num_sensors];
40037 int res = 0;
40038
40039diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
40040index 62c2e32..8f2859a 100644
40041--- a/drivers/hwmon/applesmc.c
40042+++ b/drivers/hwmon/applesmc.c
40043@@ -1084,7 +1084,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
40044 {
40045 struct applesmc_node_group *grp;
40046 struct applesmc_dev_attr *node;
40047- struct attribute *attr;
40048+ attribute_no_const *attr;
40049 int ret, i;
40050
40051 for (grp = groups; grp->format; grp++) {
40052diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
40053index b25c643..a13460d 100644
40054--- a/drivers/hwmon/asus_atk0110.c
40055+++ b/drivers/hwmon/asus_atk0110.c
40056@@ -152,10 +152,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
40057 struct atk_sensor_data {
40058 struct list_head list;
40059 struct atk_data *data;
40060- struct device_attribute label_attr;
40061- struct device_attribute input_attr;
40062- struct device_attribute limit1_attr;
40063- struct device_attribute limit2_attr;
40064+ device_attribute_no_const label_attr;
40065+ device_attribute_no_const input_attr;
40066+ device_attribute_no_const limit1_attr;
40067+ device_attribute_no_const limit2_attr;
40068 char label_attr_name[ATTR_NAME_SIZE];
40069 char input_attr_name[ATTR_NAME_SIZE];
40070 char limit1_attr_name[ATTR_NAME_SIZE];
40071@@ -275,7 +275,7 @@ static ssize_t atk_name_show(struct device *dev,
40072 static struct device_attribute atk_name_attr =
40073 __ATTR(name, 0444, atk_name_show, NULL);
40074
40075-static void atk_init_attribute(struct device_attribute *attr, char *name,
40076+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
40077 sysfs_show_func show)
40078 {
40079 sysfs_attr_init(&attr->attr);
40080diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
40081index 658ce3a..0d0c2f3 100644
40082--- a/drivers/hwmon/coretemp.c
40083+++ b/drivers/hwmon/coretemp.c
40084@@ -790,7 +790,7 @@ static int __cpuinit coretemp_cpu_callback(struct notifier_block *nfb,
40085 return NOTIFY_OK;
40086 }
40087
40088-static struct notifier_block coretemp_cpu_notifier __refdata = {
40089+static struct notifier_block coretemp_cpu_notifier = {
40090 .notifier_call = coretemp_cpu_callback,
40091 };
40092
40093diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
40094index 1429f6e..ee03d59 100644
40095--- a/drivers/hwmon/ibmaem.c
40096+++ b/drivers/hwmon/ibmaem.c
40097@@ -926,7 +926,7 @@ static int aem_register_sensors(struct aem_data *data,
40098 struct aem_rw_sensor_template *rw)
40099 {
40100 struct device *dev = &data->pdev->dev;
40101- struct sensor_device_attribute *sensors = data->sensors;
40102+ sensor_device_attribute_no_const *sensors = data->sensors;
40103 int err;
40104
40105 /* Set up read-only sensors */
40106diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
40107index 52b77af..aed1ddf 100644
40108--- a/drivers/hwmon/iio_hwmon.c
40109+++ b/drivers/hwmon/iio_hwmon.c
40110@@ -73,7 +73,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
40111 {
40112 struct device *dev = &pdev->dev;
40113 struct iio_hwmon_state *st;
40114- struct sensor_device_attribute *a;
40115+ sensor_device_attribute_no_const *a;
40116 int ret, i;
40117 int in_i = 1, temp_i = 1, curr_i = 1;
40118 enum iio_chan_type type;
40119diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
40120index 9add6092..ee7ba3f 100644
40121--- a/drivers/hwmon/pmbus/pmbus_core.c
40122+++ b/drivers/hwmon/pmbus/pmbus_core.c
40123@@ -781,7 +781,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
40124 return 0;
40125 }
40126
40127-static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
40128+static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
40129 const char *name,
40130 umode_t mode,
40131 ssize_t (*show)(struct device *dev,
40132@@ -798,7 +798,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
40133 dev_attr->store = store;
40134 }
40135
40136-static void pmbus_attr_init(struct sensor_device_attribute *a,
40137+static void pmbus_attr_init(sensor_device_attribute_no_const *a,
40138 const char *name,
40139 umode_t mode,
40140 ssize_t (*show)(struct device *dev,
40141@@ -820,7 +820,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
40142 u16 reg, u8 mask)
40143 {
40144 struct pmbus_boolean *boolean;
40145- struct sensor_device_attribute *a;
40146+ sensor_device_attribute_no_const *a;
40147
40148 boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
40149 if (!boolean)
40150@@ -845,7 +845,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
40151 bool update, bool readonly)
40152 {
40153 struct pmbus_sensor *sensor;
40154- struct device_attribute *a;
40155+ device_attribute_no_const *a;
40156
40157 sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
40158 if (!sensor)
40159@@ -876,7 +876,7 @@ static int pmbus_add_label(struct pmbus_data *data,
40160 const char *lstring, int index)
40161 {
40162 struct pmbus_label *label;
40163- struct device_attribute *a;
40164+ device_attribute_no_const *a;
40165
40166 label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
40167 if (!label)
40168diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
40169index 2507f90..1645765 100644
40170--- a/drivers/hwmon/sht15.c
40171+++ b/drivers/hwmon/sht15.c
40172@@ -169,7 +169,7 @@ struct sht15_data {
40173 int supply_uv;
40174 bool supply_uv_valid;
40175 struct work_struct update_supply_work;
40176- atomic_t interrupt_handled;
40177+ atomic_unchecked_t interrupt_handled;
40178 };
40179
40180 /**
40181@@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data,
40182 ret = gpio_direction_input(data->pdata->gpio_data);
40183 if (ret)
40184 return ret;
40185- atomic_set(&data->interrupt_handled, 0);
40186+ atomic_set_unchecked(&data->interrupt_handled, 0);
40187
40188 enable_irq(gpio_to_irq(data->pdata->gpio_data));
40189 if (gpio_get_value(data->pdata->gpio_data) == 0) {
40190 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
40191 /* Only relevant if the interrupt hasn't occurred. */
40192- if (!atomic_read(&data->interrupt_handled))
40193+ if (!atomic_read_unchecked(&data->interrupt_handled))
40194 schedule_work(&data->read_work);
40195 }
40196 ret = wait_event_timeout(data->wait_queue,
40197@@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
40198
40199 /* First disable the interrupt */
40200 disable_irq_nosync(irq);
40201- atomic_inc(&data->interrupt_handled);
40202+ atomic_inc_unchecked(&data->interrupt_handled);
40203 /* Then schedule a reading work struct */
40204 if (data->state != SHT15_READING_NOTHING)
40205 schedule_work(&data->read_work);
40206@@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
40207 * If not, then start the interrupt again - care here as could
40208 * have gone low in meantime so verify it hasn't!
40209 */
40210- atomic_set(&data->interrupt_handled, 0);
40211+ atomic_set_unchecked(&data->interrupt_handled, 0);
40212 enable_irq(gpio_to_irq(data->pdata->gpio_data));
40213 /* If still not occurred or another handler was scheduled */
40214 if (gpio_get_value(data->pdata->gpio_data)
40215- || atomic_read(&data->interrupt_handled))
40216+ || atomic_read_unchecked(&data->interrupt_handled))
40217 return;
40218 }
40219
40220diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
40221index 76f157b..9c0db1b 100644
40222--- a/drivers/hwmon/via-cputemp.c
40223+++ b/drivers/hwmon/via-cputemp.c
40224@@ -296,7 +296,7 @@ static int __cpuinit via_cputemp_cpu_callback(struct notifier_block *nfb,
40225 return NOTIFY_OK;
40226 }
40227
40228-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
40229+static struct notifier_block via_cputemp_cpu_notifier = {
40230 .notifier_call = via_cputemp_cpu_callback,
40231 };
40232
40233diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
40234index 07f01ac..d79ad3d 100644
40235--- a/drivers/i2c/busses/i2c-amd756-s4882.c
40236+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
40237@@ -43,7 +43,7 @@
40238 extern struct i2c_adapter amd756_smbus;
40239
40240 static struct i2c_adapter *s4882_adapter;
40241-static struct i2c_algorithm *s4882_algo;
40242+static i2c_algorithm_no_const *s4882_algo;
40243
40244 /* Wrapper access functions for multiplexed SMBus */
40245 static DEFINE_MUTEX(amd756_lock);
40246diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
40247index 2ca268d..c6acbdf 100644
40248--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
40249+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
40250@@ -41,7 +41,7 @@
40251 extern struct i2c_adapter *nforce2_smbus;
40252
40253 static struct i2c_adapter *s4985_adapter;
40254-static struct i2c_algorithm *s4985_algo;
40255+static i2c_algorithm_no_const *s4985_algo;
40256
40257 /* Wrapper access functions for multiplexed SMBus */
40258 static DEFINE_MUTEX(nforce2_lock);
40259diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
40260index c3ccdea..5b3dc1a 100644
40261--- a/drivers/i2c/i2c-dev.c
40262+++ b/drivers/i2c/i2c-dev.c
40263@@ -271,7 +271,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
40264 break;
40265 }
40266
40267- data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
40268+ data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
40269 rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
40270 if (IS_ERR(rdwr_pa[i].buf)) {
40271 res = PTR_ERR(rdwr_pa[i].buf);
40272diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
40273index 2ff6204..218c16e 100644
40274--- a/drivers/ide/ide-cd.c
40275+++ b/drivers/ide/ide-cd.c
40276@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
40277 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
40278 if ((unsigned long)buf & alignment
40279 || blk_rq_bytes(rq) & q->dma_pad_mask
40280- || object_is_on_stack(buf))
40281+ || object_starts_on_stack(buf))
40282 drive->dma = 0;
40283 }
40284 }
40285diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
40286index e145931..08bfc59 100644
40287--- a/drivers/iio/industrialio-core.c
40288+++ b/drivers/iio/industrialio-core.c
40289@@ -506,7 +506,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
40290 }
40291
40292 static
40293-int __iio_device_attr_init(struct device_attribute *dev_attr,
40294+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
40295 const char *postfix,
40296 struct iio_chan_spec const *chan,
40297 ssize_t (*readfunc)(struct device *dev,
40298diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
40299index 784b97c..c9ceadf 100644
40300--- a/drivers/infiniband/core/cm.c
40301+++ b/drivers/infiniband/core/cm.c
40302@@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
40303
40304 struct cm_counter_group {
40305 struct kobject obj;
40306- atomic_long_t counter[CM_ATTR_COUNT];
40307+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
40308 };
40309
40310 struct cm_counter_attribute {
40311@@ -1395,7 +1395,7 @@ static void cm_dup_req_handler(struct cm_work *work,
40312 struct ib_mad_send_buf *msg = NULL;
40313 int ret;
40314
40315- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40316+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40317 counter[CM_REQ_COUNTER]);
40318
40319 /* Quick state check to discard duplicate REQs. */
40320@@ -1779,7 +1779,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
40321 if (!cm_id_priv)
40322 return;
40323
40324- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40325+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40326 counter[CM_REP_COUNTER]);
40327 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
40328 if (ret)
40329@@ -1946,7 +1946,7 @@ static int cm_rtu_handler(struct cm_work *work)
40330 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
40331 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
40332 spin_unlock_irq(&cm_id_priv->lock);
40333- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40334+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40335 counter[CM_RTU_COUNTER]);
40336 goto out;
40337 }
40338@@ -2129,7 +2129,7 @@ static int cm_dreq_handler(struct cm_work *work)
40339 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
40340 dreq_msg->local_comm_id);
40341 if (!cm_id_priv) {
40342- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40343+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40344 counter[CM_DREQ_COUNTER]);
40345 cm_issue_drep(work->port, work->mad_recv_wc);
40346 return -EINVAL;
40347@@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_work *work)
40348 case IB_CM_MRA_REP_RCVD:
40349 break;
40350 case IB_CM_TIMEWAIT:
40351- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40352+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40353 counter[CM_DREQ_COUNTER]);
40354 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
40355 goto unlock;
40356@@ -2168,7 +2168,7 @@ static int cm_dreq_handler(struct cm_work *work)
40357 cm_free_msg(msg);
40358 goto deref;
40359 case IB_CM_DREQ_RCVD:
40360- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40361+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40362 counter[CM_DREQ_COUNTER]);
40363 goto unlock;
40364 default:
40365@@ -2535,7 +2535,7 @@ static int cm_mra_handler(struct cm_work *work)
40366 ib_modify_mad(cm_id_priv->av.port->mad_agent,
40367 cm_id_priv->msg, timeout)) {
40368 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
40369- atomic_long_inc(&work->port->
40370+ atomic_long_inc_unchecked(&work->port->
40371 counter_group[CM_RECV_DUPLICATES].
40372 counter[CM_MRA_COUNTER]);
40373 goto out;
40374@@ -2544,7 +2544,7 @@ static int cm_mra_handler(struct cm_work *work)
40375 break;
40376 case IB_CM_MRA_REQ_RCVD:
40377 case IB_CM_MRA_REP_RCVD:
40378- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40379+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40380 counter[CM_MRA_COUNTER]);
40381 /* fall through */
40382 default:
40383@@ -2706,7 +2706,7 @@ static int cm_lap_handler(struct cm_work *work)
40384 case IB_CM_LAP_IDLE:
40385 break;
40386 case IB_CM_MRA_LAP_SENT:
40387- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40388+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40389 counter[CM_LAP_COUNTER]);
40390 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
40391 goto unlock;
40392@@ -2722,7 +2722,7 @@ static int cm_lap_handler(struct cm_work *work)
40393 cm_free_msg(msg);
40394 goto deref;
40395 case IB_CM_LAP_RCVD:
40396- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40397+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40398 counter[CM_LAP_COUNTER]);
40399 goto unlock;
40400 default:
40401@@ -3006,7 +3006,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
40402 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
40403 if (cur_cm_id_priv) {
40404 spin_unlock_irq(&cm.lock);
40405- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40406+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40407 counter[CM_SIDR_REQ_COUNTER]);
40408 goto out; /* Duplicate message. */
40409 }
40410@@ -3218,10 +3218,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
40411 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
40412 msg->retries = 1;
40413
40414- atomic_long_add(1 + msg->retries,
40415+ atomic_long_add_unchecked(1 + msg->retries,
40416 &port->counter_group[CM_XMIT].counter[attr_index]);
40417 if (msg->retries)
40418- atomic_long_add(msg->retries,
40419+ atomic_long_add_unchecked(msg->retries,
40420 &port->counter_group[CM_XMIT_RETRIES].
40421 counter[attr_index]);
40422
40423@@ -3431,7 +3431,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
40424 }
40425
40426 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
40427- atomic_long_inc(&port->counter_group[CM_RECV].
40428+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
40429 counter[attr_id - CM_ATTR_ID_OFFSET]);
40430
40431 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
40432@@ -3636,7 +3636,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
40433 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
40434
40435 return sprintf(buf, "%ld\n",
40436- atomic_long_read(&group->counter[cm_attr->index]));
40437+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
40438 }
40439
40440 static const struct sysfs_ops cm_counter_ops = {
40441diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
40442index 9f5ad7c..588cd84 100644
40443--- a/drivers/infiniband/core/fmr_pool.c
40444+++ b/drivers/infiniband/core/fmr_pool.c
40445@@ -98,8 +98,8 @@ struct ib_fmr_pool {
40446
40447 struct task_struct *thread;
40448
40449- atomic_t req_ser;
40450- atomic_t flush_ser;
40451+ atomic_unchecked_t req_ser;
40452+ atomic_unchecked_t flush_ser;
40453
40454 wait_queue_head_t force_wait;
40455 };
40456@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
40457 struct ib_fmr_pool *pool = pool_ptr;
40458
40459 do {
40460- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
40461+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
40462 ib_fmr_batch_release(pool);
40463
40464- atomic_inc(&pool->flush_ser);
40465+ atomic_inc_unchecked(&pool->flush_ser);
40466 wake_up_interruptible(&pool->force_wait);
40467
40468 if (pool->flush_function)
40469@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
40470 }
40471
40472 set_current_state(TASK_INTERRUPTIBLE);
40473- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
40474+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
40475 !kthread_should_stop())
40476 schedule();
40477 __set_current_state(TASK_RUNNING);
40478@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
40479 pool->dirty_watermark = params->dirty_watermark;
40480 pool->dirty_len = 0;
40481 spin_lock_init(&pool->pool_lock);
40482- atomic_set(&pool->req_ser, 0);
40483- atomic_set(&pool->flush_ser, 0);
40484+ atomic_set_unchecked(&pool->req_ser, 0);
40485+ atomic_set_unchecked(&pool->flush_ser, 0);
40486 init_waitqueue_head(&pool->force_wait);
40487
40488 pool->thread = kthread_run(ib_fmr_cleanup_thread,
40489@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
40490 }
40491 spin_unlock_irq(&pool->pool_lock);
40492
40493- serial = atomic_inc_return(&pool->req_ser);
40494+ serial = atomic_inc_return_unchecked(&pool->req_ser);
40495 wake_up_process(pool->thread);
40496
40497 if (wait_event_interruptible(pool->force_wait,
40498- atomic_read(&pool->flush_ser) - serial >= 0))
40499+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
40500 return -EINTR;
40501
40502 return 0;
40503@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
40504 } else {
40505 list_add_tail(&fmr->list, &pool->dirty_list);
40506 if (++pool->dirty_len >= pool->dirty_watermark) {
40507- atomic_inc(&pool->req_ser);
40508+ atomic_inc_unchecked(&pool->req_ser);
40509 wake_up_process(pool->thread);
40510 }
40511 }
40512diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
40513index 4cb8eb2..146bf60 100644
40514--- a/drivers/infiniband/hw/cxgb4/mem.c
40515+++ b/drivers/infiniband/hw/cxgb4/mem.c
40516@@ -249,7 +249,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
40517 int err;
40518 struct fw_ri_tpte tpt;
40519 u32 stag_idx;
40520- static atomic_t key;
40521+ static atomic_unchecked_t key;
40522
40523 if (c4iw_fatal_error(rdev))
40524 return -EIO;
40525@@ -266,7 +266,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
40526 if (rdev->stats.stag.cur > rdev->stats.stag.max)
40527 rdev->stats.stag.max = rdev->stats.stag.cur;
40528 mutex_unlock(&rdev->stats.lock);
40529- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
40530+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
40531 }
40532 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
40533 __func__, stag_state, type, pdid, stag_idx);
40534diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
40535index 79b3dbc..96e5fcc 100644
40536--- a/drivers/infiniband/hw/ipath/ipath_rc.c
40537+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
40538@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
40539 struct ib_atomic_eth *ateth;
40540 struct ipath_ack_entry *e;
40541 u64 vaddr;
40542- atomic64_t *maddr;
40543+ atomic64_unchecked_t *maddr;
40544 u64 sdata;
40545 u32 rkey;
40546 u8 next;
40547@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
40548 IB_ACCESS_REMOTE_ATOMIC)))
40549 goto nack_acc_unlck;
40550 /* Perform atomic OP and save result. */
40551- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
40552+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
40553 sdata = be64_to_cpu(ateth->swap_data);
40554 e = &qp->s_ack_queue[qp->r_head_ack_queue];
40555 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
40556- (u64) atomic64_add_return(sdata, maddr) - sdata :
40557+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
40558 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
40559 be64_to_cpu(ateth->compare_data),
40560 sdata);
40561diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
40562index 1f95bba..9530f87 100644
40563--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
40564+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
40565@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
40566 unsigned long flags;
40567 struct ib_wc wc;
40568 u64 sdata;
40569- atomic64_t *maddr;
40570+ atomic64_unchecked_t *maddr;
40571 enum ib_wc_status send_status;
40572
40573 /*
40574@@ -382,11 +382,11 @@ again:
40575 IB_ACCESS_REMOTE_ATOMIC)))
40576 goto acc_err;
40577 /* Perform atomic OP and save result. */
40578- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
40579+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
40580 sdata = wqe->wr.wr.atomic.compare_add;
40581 *(u64 *) sqp->s_sge.sge.vaddr =
40582 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
40583- (u64) atomic64_add_return(sdata, maddr) - sdata :
40584+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
40585 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
40586 sdata, wqe->wr.wr.atomic.swap);
40587 goto send_comp;
40588diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
40589index 9d3e5c1..d9afe4a 100644
40590--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
40591+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
40592@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
40593 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
40594 }
40595
40596-int mthca_QUERY_FW(struct mthca_dev *dev)
40597+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
40598 {
40599 struct mthca_mailbox *mailbox;
40600 u32 *outbox;
40601diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
40602index ed9a989..e0c5871 100644
40603--- a/drivers/infiniband/hw/mthca/mthca_mr.c
40604+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
40605@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
40606 return key;
40607 }
40608
40609-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
40610+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
40611 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
40612 {
40613 struct mthca_mailbox *mailbox;
40614diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
40615index 4291410..d2ab1fb 100644
40616--- a/drivers/infiniband/hw/nes/nes.c
40617+++ b/drivers/infiniband/hw/nes/nes.c
40618@@ -98,7 +98,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
40619 LIST_HEAD(nes_adapter_list);
40620 static LIST_HEAD(nes_dev_list);
40621
40622-atomic_t qps_destroyed;
40623+atomic_unchecked_t qps_destroyed;
40624
40625 static unsigned int ee_flsh_adapter;
40626 static unsigned int sysfs_nonidx_addr;
40627@@ -269,7 +269,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
40628 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
40629 struct nes_adapter *nesadapter = nesdev->nesadapter;
40630
40631- atomic_inc(&qps_destroyed);
40632+ atomic_inc_unchecked(&qps_destroyed);
40633
40634 /* Free the control structures */
40635
40636diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
40637index 33cc589..3bd6538 100644
40638--- a/drivers/infiniband/hw/nes/nes.h
40639+++ b/drivers/infiniband/hw/nes/nes.h
40640@@ -177,17 +177,17 @@ extern unsigned int nes_debug_level;
40641 extern unsigned int wqm_quanta;
40642 extern struct list_head nes_adapter_list;
40643
40644-extern atomic_t cm_connects;
40645-extern atomic_t cm_accepts;
40646-extern atomic_t cm_disconnects;
40647-extern atomic_t cm_closes;
40648-extern atomic_t cm_connecteds;
40649-extern atomic_t cm_connect_reqs;
40650-extern atomic_t cm_rejects;
40651-extern atomic_t mod_qp_timouts;
40652-extern atomic_t qps_created;
40653-extern atomic_t qps_destroyed;
40654-extern atomic_t sw_qps_destroyed;
40655+extern atomic_unchecked_t cm_connects;
40656+extern atomic_unchecked_t cm_accepts;
40657+extern atomic_unchecked_t cm_disconnects;
40658+extern atomic_unchecked_t cm_closes;
40659+extern atomic_unchecked_t cm_connecteds;
40660+extern atomic_unchecked_t cm_connect_reqs;
40661+extern atomic_unchecked_t cm_rejects;
40662+extern atomic_unchecked_t mod_qp_timouts;
40663+extern atomic_unchecked_t qps_created;
40664+extern atomic_unchecked_t qps_destroyed;
40665+extern atomic_unchecked_t sw_qps_destroyed;
40666 extern u32 mh_detected;
40667 extern u32 mh_pauses_sent;
40668 extern u32 cm_packets_sent;
40669@@ -196,16 +196,16 @@ extern u32 cm_packets_created;
40670 extern u32 cm_packets_received;
40671 extern u32 cm_packets_dropped;
40672 extern u32 cm_packets_retrans;
40673-extern atomic_t cm_listens_created;
40674-extern atomic_t cm_listens_destroyed;
40675+extern atomic_unchecked_t cm_listens_created;
40676+extern atomic_unchecked_t cm_listens_destroyed;
40677 extern u32 cm_backlog_drops;
40678-extern atomic_t cm_loopbacks;
40679-extern atomic_t cm_nodes_created;
40680-extern atomic_t cm_nodes_destroyed;
40681-extern atomic_t cm_accel_dropped_pkts;
40682-extern atomic_t cm_resets_recvd;
40683-extern atomic_t pau_qps_created;
40684-extern atomic_t pau_qps_destroyed;
40685+extern atomic_unchecked_t cm_loopbacks;
40686+extern atomic_unchecked_t cm_nodes_created;
40687+extern atomic_unchecked_t cm_nodes_destroyed;
40688+extern atomic_unchecked_t cm_accel_dropped_pkts;
40689+extern atomic_unchecked_t cm_resets_recvd;
40690+extern atomic_unchecked_t pau_qps_created;
40691+extern atomic_unchecked_t pau_qps_destroyed;
40692
40693 extern u32 int_mod_timer_init;
40694 extern u32 int_mod_cq_depth_256;
40695diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
40696index 24b9f1a..00fd004 100644
40697--- a/drivers/infiniband/hw/nes/nes_cm.c
40698+++ b/drivers/infiniband/hw/nes/nes_cm.c
40699@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
40700 u32 cm_packets_retrans;
40701 u32 cm_packets_created;
40702 u32 cm_packets_received;
40703-atomic_t cm_listens_created;
40704-atomic_t cm_listens_destroyed;
40705+atomic_unchecked_t cm_listens_created;
40706+atomic_unchecked_t cm_listens_destroyed;
40707 u32 cm_backlog_drops;
40708-atomic_t cm_loopbacks;
40709-atomic_t cm_nodes_created;
40710-atomic_t cm_nodes_destroyed;
40711-atomic_t cm_accel_dropped_pkts;
40712-atomic_t cm_resets_recvd;
40713+atomic_unchecked_t cm_loopbacks;
40714+atomic_unchecked_t cm_nodes_created;
40715+atomic_unchecked_t cm_nodes_destroyed;
40716+atomic_unchecked_t cm_accel_dropped_pkts;
40717+atomic_unchecked_t cm_resets_recvd;
40718
40719 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
40720 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
40721@@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
40722
40723 static struct nes_cm_core *g_cm_core;
40724
40725-atomic_t cm_connects;
40726-atomic_t cm_accepts;
40727-atomic_t cm_disconnects;
40728-atomic_t cm_closes;
40729-atomic_t cm_connecteds;
40730-atomic_t cm_connect_reqs;
40731-atomic_t cm_rejects;
40732+atomic_unchecked_t cm_connects;
40733+atomic_unchecked_t cm_accepts;
40734+atomic_unchecked_t cm_disconnects;
40735+atomic_unchecked_t cm_closes;
40736+atomic_unchecked_t cm_connecteds;
40737+atomic_unchecked_t cm_connect_reqs;
40738+atomic_unchecked_t cm_rejects;
40739
40740 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
40741 {
40742@@ -1272,7 +1272,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
40743 kfree(listener);
40744 listener = NULL;
40745 ret = 0;
40746- atomic_inc(&cm_listens_destroyed);
40747+ atomic_inc_unchecked(&cm_listens_destroyed);
40748 } else {
40749 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
40750 }
40751@@ -1466,7 +1466,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
40752 cm_node->rem_mac);
40753
40754 add_hte_node(cm_core, cm_node);
40755- atomic_inc(&cm_nodes_created);
40756+ atomic_inc_unchecked(&cm_nodes_created);
40757
40758 return cm_node;
40759 }
40760@@ -1524,7 +1524,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
40761 }
40762
40763 atomic_dec(&cm_core->node_cnt);
40764- atomic_inc(&cm_nodes_destroyed);
40765+ atomic_inc_unchecked(&cm_nodes_destroyed);
40766 nesqp = cm_node->nesqp;
40767 if (nesqp) {
40768 nesqp->cm_node = NULL;
40769@@ -1588,7 +1588,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
40770
40771 static void drop_packet(struct sk_buff *skb)
40772 {
40773- atomic_inc(&cm_accel_dropped_pkts);
40774+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
40775 dev_kfree_skb_any(skb);
40776 }
40777
40778@@ -1651,7 +1651,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
40779 {
40780
40781 int reset = 0; /* whether to send reset in case of err.. */
40782- atomic_inc(&cm_resets_recvd);
40783+ atomic_inc_unchecked(&cm_resets_recvd);
40784 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
40785 " refcnt=%d\n", cm_node, cm_node->state,
40786 atomic_read(&cm_node->ref_count));
40787@@ -2292,7 +2292,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
40788 rem_ref_cm_node(cm_node->cm_core, cm_node);
40789 return NULL;
40790 }
40791- atomic_inc(&cm_loopbacks);
40792+ atomic_inc_unchecked(&cm_loopbacks);
40793 loopbackremotenode->loopbackpartner = cm_node;
40794 loopbackremotenode->tcp_cntxt.rcv_wscale =
40795 NES_CM_DEFAULT_RCV_WND_SCALE;
40796@@ -2567,7 +2567,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
40797 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
40798 else {
40799 rem_ref_cm_node(cm_core, cm_node);
40800- atomic_inc(&cm_accel_dropped_pkts);
40801+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
40802 dev_kfree_skb_any(skb);
40803 }
40804 break;
40805@@ -2875,7 +2875,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
40806
40807 if ((cm_id) && (cm_id->event_handler)) {
40808 if (issue_disconn) {
40809- atomic_inc(&cm_disconnects);
40810+ atomic_inc_unchecked(&cm_disconnects);
40811 cm_event.event = IW_CM_EVENT_DISCONNECT;
40812 cm_event.status = disconn_status;
40813 cm_event.local_addr = cm_id->local_addr;
40814@@ -2897,7 +2897,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
40815 }
40816
40817 if (issue_close) {
40818- atomic_inc(&cm_closes);
40819+ atomic_inc_unchecked(&cm_closes);
40820 nes_disconnect(nesqp, 1);
40821
40822 cm_id->provider_data = nesqp;
40823@@ -3033,7 +3033,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
40824
40825 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
40826 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
40827- atomic_inc(&cm_accepts);
40828+ atomic_inc_unchecked(&cm_accepts);
40829
40830 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
40831 netdev_refcnt_read(nesvnic->netdev));
40832@@ -3228,7 +3228,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
40833 struct nes_cm_core *cm_core;
40834 u8 *start_buff;
40835
40836- atomic_inc(&cm_rejects);
40837+ atomic_inc_unchecked(&cm_rejects);
40838 cm_node = (struct nes_cm_node *)cm_id->provider_data;
40839 loopback = cm_node->loopbackpartner;
40840 cm_core = cm_node->cm_core;
40841@@ -3288,7 +3288,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
40842 ntohl(cm_id->local_addr.sin_addr.s_addr),
40843 ntohs(cm_id->local_addr.sin_port));
40844
40845- atomic_inc(&cm_connects);
40846+ atomic_inc_unchecked(&cm_connects);
40847 nesqp->active_conn = 1;
40848
40849 /* cache the cm_id in the qp */
40850@@ -3398,7 +3398,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
40851 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
40852 return err;
40853 }
40854- atomic_inc(&cm_listens_created);
40855+ atomic_inc_unchecked(&cm_listens_created);
40856 }
40857
40858 cm_id->add_ref(cm_id);
40859@@ -3499,7 +3499,7 @@ static void cm_event_connected(struct nes_cm_event *event)
40860
40861 if (nesqp->destroyed)
40862 return;
40863- atomic_inc(&cm_connecteds);
40864+ atomic_inc_unchecked(&cm_connecteds);
40865 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
40866 " local port 0x%04X. jiffies = %lu.\n",
40867 nesqp->hwqp.qp_id,
40868@@ -3679,7 +3679,7 @@ static void cm_event_reset(struct nes_cm_event *event)
40869
40870 cm_id->add_ref(cm_id);
40871 ret = cm_id->event_handler(cm_id, &cm_event);
40872- atomic_inc(&cm_closes);
40873+ atomic_inc_unchecked(&cm_closes);
40874 cm_event.event = IW_CM_EVENT_CLOSE;
40875 cm_event.status = 0;
40876 cm_event.provider_data = cm_id->provider_data;
40877@@ -3715,7 +3715,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
40878 return;
40879 cm_id = cm_node->cm_id;
40880
40881- atomic_inc(&cm_connect_reqs);
40882+ atomic_inc_unchecked(&cm_connect_reqs);
40883 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
40884 cm_node, cm_id, jiffies);
40885
40886@@ -3755,7 +3755,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
40887 return;
40888 cm_id = cm_node->cm_id;
40889
40890- atomic_inc(&cm_connect_reqs);
40891+ atomic_inc_unchecked(&cm_connect_reqs);
40892 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
40893 cm_node, cm_id, jiffies);
40894
40895diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
40896index 4166452..fc952c3 100644
40897--- a/drivers/infiniband/hw/nes/nes_mgt.c
40898+++ b/drivers/infiniband/hw/nes/nes_mgt.c
40899@@ -40,8 +40,8 @@
40900 #include "nes.h"
40901 #include "nes_mgt.h"
40902
40903-atomic_t pau_qps_created;
40904-atomic_t pau_qps_destroyed;
40905+atomic_unchecked_t pau_qps_created;
40906+atomic_unchecked_t pau_qps_destroyed;
40907
40908 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
40909 {
40910@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
40911 {
40912 struct sk_buff *skb;
40913 unsigned long flags;
40914- atomic_inc(&pau_qps_destroyed);
40915+ atomic_inc_unchecked(&pau_qps_destroyed);
40916
40917 /* Free packets that have not yet been forwarded */
40918 /* Lock is acquired by skb_dequeue when removing the skb */
40919@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
40920 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
40921 skb_queue_head_init(&nesqp->pau_list);
40922 spin_lock_init(&nesqp->pau_lock);
40923- atomic_inc(&pau_qps_created);
40924+ atomic_inc_unchecked(&pau_qps_created);
40925 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
40926 }
40927
40928diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
40929index 49eb511..a774366 100644
40930--- a/drivers/infiniband/hw/nes/nes_nic.c
40931+++ b/drivers/infiniband/hw/nes/nes_nic.c
40932@@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
40933 target_stat_values[++index] = mh_detected;
40934 target_stat_values[++index] = mh_pauses_sent;
40935 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
40936- target_stat_values[++index] = atomic_read(&cm_connects);
40937- target_stat_values[++index] = atomic_read(&cm_accepts);
40938- target_stat_values[++index] = atomic_read(&cm_disconnects);
40939- target_stat_values[++index] = atomic_read(&cm_connecteds);
40940- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
40941- target_stat_values[++index] = atomic_read(&cm_rejects);
40942- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
40943- target_stat_values[++index] = atomic_read(&qps_created);
40944- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
40945- target_stat_values[++index] = atomic_read(&qps_destroyed);
40946- target_stat_values[++index] = atomic_read(&cm_closes);
40947+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
40948+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
40949+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
40950+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
40951+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
40952+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
40953+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
40954+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
40955+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
40956+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
40957+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
40958 target_stat_values[++index] = cm_packets_sent;
40959 target_stat_values[++index] = cm_packets_bounced;
40960 target_stat_values[++index] = cm_packets_created;
40961 target_stat_values[++index] = cm_packets_received;
40962 target_stat_values[++index] = cm_packets_dropped;
40963 target_stat_values[++index] = cm_packets_retrans;
40964- target_stat_values[++index] = atomic_read(&cm_listens_created);
40965- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
40966+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
40967+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
40968 target_stat_values[++index] = cm_backlog_drops;
40969- target_stat_values[++index] = atomic_read(&cm_loopbacks);
40970- target_stat_values[++index] = atomic_read(&cm_nodes_created);
40971- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
40972- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
40973- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
40974+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
40975+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
40976+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
40977+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
40978+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
40979 target_stat_values[++index] = nesadapter->free_4kpbl;
40980 target_stat_values[++index] = nesadapter->free_256pbl;
40981 target_stat_values[++index] = int_mod_timer_init;
40982 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
40983 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
40984 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
40985- target_stat_values[++index] = atomic_read(&pau_qps_created);
40986- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
40987+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
40988+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
40989 }
40990
40991 /**
40992diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
40993index 8f67fe2..8960859 100644
40994--- a/drivers/infiniband/hw/nes/nes_verbs.c
40995+++ b/drivers/infiniband/hw/nes/nes_verbs.c
40996@@ -46,9 +46,9 @@
40997
40998 #include <rdma/ib_umem.h>
40999
41000-atomic_t mod_qp_timouts;
41001-atomic_t qps_created;
41002-atomic_t sw_qps_destroyed;
41003+atomic_unchecked_t mod_qp_timouts;
41004+atomic_unchecked_t qps_created;
41005+atomic_unchecked_t sw_qps_destroyed;
41006
41007 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
41008
41009@@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
41010 if (init_attr->create_flags)
41011 return ERR_PTR(-EINVAL);
41012
41013- atomic_inc(&qps_created);
41014+ atomic_inc_unchecked(&qps_created);
41015 switch (init_attr->qp_type) {
41016 case IB_QPT_RC:
41017 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
41018@@ -1465,7 +1465,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
41019 struct iw_cm_event cm_event;
41020 int ret = 0;
41021
41022- atomic_inc(&sw_qps_destroyed);
41023+ atomic_inc_unchecked(&sw_qps_destroyed);
41024 nesqp->destroyed = 1;
41025
41026 /* Blow away the connection if it exists. */
41027diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
41028index 4d11575..3e890e5 100644
41029--- a/drivers/infiniband/hw/qib/qib.h
41030+++ b/drivers/infiniband/hw/qib/qib.h
41031@@ -51,6 +51,7 @@
41032 #include <linux/completion.h>
41033 #include <linux/kref.h>
41034 #include <linux/sched.h>
41035+#include <linux/slab.h>
41036
41037 #include "qib_common.h"
41038 #include "qib_verbs.h"
41039diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
41040index da739d9..da1c7f4 100644
41041--- a/drivers/input/gameport/gameport.c
41042+++ b/drivers/input/gameport/gameport.c
41043@@ -487,14 +487,14 @@ EXPORT_SYMBOL(gameport_set_phys);
41044 */
41045 static void gameport_init_port(struct gameport *gameport)
41046 {
41047- static atomic_t gameport_no = ATOMIC_INIT(0);
41048+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
41049
41050 __module_get(THIS_MODULE);
41051
41052 mutex_init(&gameport->drv_mutex);
41053 device_initialize(&gameport->dev);
41054 dev_set_name(&gameport->dev, "gameport%lu",
41055- (unsigned long)atomic_inc_return(&gameport_no) - 1);
41056+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
41057 gameport->dev.bus = &gameport_bus;
41058 gameport->dev.release = gameport_release_port;
41059 if (gameport->parent)
41060diff --git a/drivers/input/input.c b/drivers/input/input.c
41061index c044699..174d71a 100644
41062--- a/drivers/input/input.c
41063+++ b/drivers/input/input.c
41064@@ -2019,7 +2019,7 @@ static void devm_input_device_unregister(struct device *dev, void *res)
41065 */
41066 int input_register_device(struct input_dev *dev)
41067 {
41068- static atomic_t input_no = ATOMIC_INIT(0);
41069+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
41070 struct input_devres *devres = NULL;
41071 struct input_handler *handler;
41072 unsigned int packet_size;
41073@@ -2074,7 +2074,7 @@ int input_register_device(struct input_dev *dev)
41074 dev->setkeycode = input_default_setkeycode;
41075
41076 dev_set_name(&dev->dev, "input%ld",
41077- (unsigned long) atomic_inc_return(&input_no) - 1);
41078+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
41079
41080 error = device_add(&dev->dev);
41081 if (error)
41082diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
41083index 04c69af..5f92d00 100644
41084--- a/drivers/input/joystick/sidewinder.c
41085+++ b/drivers/input/joystick/sidewinder.c
41086@@ -30,6 +30,7 @@
41087 #include <linux/kernel.h>
41088 #include <linux/module.h>
41089 #include <linux/slab.h>
41090+#include <linux/sched.h>
41091 #include <linux/init.h>
41092 #include <linux/input.h>
41093 #include <linux/gameport.h>
41094diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
41095index fa061d4..4a6957c 100644
41096--- a/drivers/input/joystick/xpad.c
41097+++ b/drivers/input/joystick/xpad.c
41098@@ -735,7 +735,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
41099
41100 static int xpad_led_probe(struct usb_xpad *xpad)
41101 {
41102- static atomic_t led_seq = ATOMIC_INIT(0);
41103+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
41104 long led_no;
41105 struct xpad_led *led;
41106 struct led_classdev *led_cdev;
41107@@ -748,7 +748,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
41108 if (!led)
41109 return -ENOMEM;
41110
41111- led_no = (long)atomic_inc_return(&led_seq) - 1;
41112+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
41113
41114 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
41115 led->xpad = xpad;
41116diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
41117index 2f0b39d..7370f13 100644
41118--- a/drivers/input/mouse/psmouse.h
41119+++ b/drivers/input/mouse/psmouse.h
41120@@ -116,7 +116,7 @@ struct psmouse_attribute {
41121 ssize_t (*set)(struct psmouse *psmouse, void *data,
41122 const char *buf, size_t count);
41123 bool protect;
41124-};
41125+} __do_const;
41126 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
41127
41128 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
41129diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
41130index 4c842c3..590b0bf 100644
41131--- a/drivers/input/mousedev.c
41132+++ b/drivers/input/mousedev.c
41133@@ -738,7 +738,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
41134
41135 spin_unlock_irq(&client->packet_lock);
41136
41137- if (copy_to_user(buffer, data, count))
41138+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
41139 return -EFAULT;
41140
41141 return count;
41142diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
41143index 25fc597..558bf3b3 100644
41144--- a/drivers/input/serio/serio.c
41145+++ b/drivers/input/serio/serio.c
41146@@ -496,7 +496,7 @@ static void serio_release_port(struct device *dev)
41147 */
41148 static void serio_init_port(struct serio *serio)
41149 {
41150- static atomic_t serio_no = ATOMIC_INIT(0);
41151+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
41152
41153 __module_get(THIS_MODULE);
41154
41155@@ -507,7 +507,7 @@ static void serio_init_port(struct serio *serio)
41156 mutex_init(&serio->drv_mutex);
41157 device_initialize(&serio->dev);
41158 dev_set_name(&serio->dev, "serio%ld",
41159- (long)atomic_inc_return(&serio_no) - 1);
41160+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
41161 serio->dev.bus = &serio_bus;
41162 serio->dev.release = serio_release_port;
41163 serio->dev.groups = serio_device_attr_groups;
41164diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
41165index d8f98b1..f62a640 100644
41166--- a/drivers/iommu/iommu.c
41167+++ b/drivers/iommu/iommu.c
41168@@ -583,7 +583,7 @@ static struct notifier_block iommu_bus_nb = {
41169 static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
41170 {
41171 bus_register_notifier(bus, &iommu_bus_nb);
41172- bus_for_each_dev(bus, NULL, ops, add_iommu_group);
41173+ bus_for_each_dev(bus, NULL, (void *)ops, add_iommu_group);
41174 }
41175
41176 /**
41177diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
41178index dcfea4e..f4226b2 100644
41179--- a/drivers/iommu/irq_remapping.c
41180+++ b/drivers/iommu/irq_remapping.c
41181@@ -354,7 +354,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
41182 void panic_if_irq_remap(const char *msg)
41183 {
41184 if (irq_remapping_enabled)
41185- panic(msg);
41186+ panic("%s", msg);
41187 }
41188
41189 static void ir_ack_apic_edge(struct irq_data *data)
41190@@ -375,10 +375,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
41191
41192 void irq_remap_modify_chip_defaults(struct irq_chip *chip)
41193 {
41194- chip->irq_print_chip = ir_print_prefix;
41195- chip->irq_ack = ir_ack_apic_edge;
41196- chip->irq_eoi = ir_ack_apic_level;
41197- chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
41198+ pax_open_kernel();
41199+ *(void **)&chip->irq_print_chip = ir_print_prefix;
41200+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
41201+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
41202+ *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
41203+ pax_close_kernel();
41204 }
41205
41206 bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
41207diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
41208index 19ceaa6..3625818 100644
41209--- a/drivers/irqchip/irq-gic.c
41210+++ b/drivers/irqchip/irq-gic.c
41211@@ -84,7 +84,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
41212 * Supported arch specific GIC irq extension.
41213 * Default make them NULL.
41214 */
41215-struct irq_chip gic_arch_extn = {
41216+irq_chip_no_const gic_arch_extn = {
41217 .irq_eoi = NULL,
41218 .irq_mask = NULL,
41219 .irq_unmask = NULL,
41220@@ -333,7 +333,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
41221 chained_irq_exit(chip, desc);
41222 }
41223
41224-static struct irq_chip gic_chip = {
41225+static irq_chip_no_const gic_chip __read_only = {
41226 .name = "GIC",
41227 .irq_mask = gic_mask_irq,
41228 .irq_unmask = gic_unmask_irq,
41229diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
41230index ac6f72b..81150f2 100644
41231--- a/drivers/isdn/capi/capi.c
41232+++ b/drivers/isdn/capi/capi.c
41233@@ -81,8 +81,8 @@ struct capiminor {
41234
41235 struct capi20_appl *ap;
41236 u32 ncci;
41237- atomic_t datahandle;
41238- atomic_t msgid;
41239+ atomic_unchecked_t datahandle;
41240+ atomic_unchecked_t msgid;
41241
41242 struct tty_port port;
41243 int ttyinstop;
41244@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
41245 capimsg_setu16(s, 2, mp->ap->applid);
41246 capimsg_setu8 (s, 4, CAPI_DATA_B3);
41247 capimsg_setu8 (s, 5, CAPI_RESP);
41248- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
41249+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
41250 capimsg_setu32(s, 8, mp->ncci);
41251 capimsg_setu16(s, 12, datahandle);
41252 }
41253@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
41254 mp->outbytes -= len;
41255 spin_unlock_bh(&mp->outlock);
41256
41257- datahandle = atomic_inc_return(&mp->datahandle);
41258+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
41259 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
41260 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
41261 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
41262 capimsg_setu16(skb->data, 2, mp->ap->applid);
41263 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
41264 capimsg_setu8 (skb->data, 5, CAPI_REQ);
41265- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
41266+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
41267 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
41268 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
41269 capimsg_setu16(skb->data, 16, len); /* Data length */
41270diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
41271index 600c79b..3752bab 100644
41272--- a/drivers/isdn/gigaset/interface.c
41273+++ b/drivers/isdn/gigaset/interface.c
41274@@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
41275 }
41276 tty->driver_data = cs;
41277
41278- ++cs->port.count;
41279+ atomic_inc(&cs->port.count);
41280
41281- if (cs->port.count == 1) {
41282+ if (atomic_read(&cs->port.count) == 1) {
41283 tty_port_tty_set(&cs->port, tty);
41284 cs->port.low_latency = 1;
41285 }
41286@@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
41287
41288 if (!cs->connected)
41289 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
41290- else if (!cs->port.count)
41291+ else if (!atomic_read(&cs->port.count))
41292 dev_warn(cs->dev, "%s: device not opened\n", __func__);
41293- else if (!--cs->port.count)
41294+ else if (!atomic_dec_return(&cs->port.count))
41295 tty_port_tty_set(&cs->port, NULL);
41296
41297 mutex_unlock(&cs->mutex);
e2b79cd1
AF
41298diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
41299index d0a41cb..f0cdb8c 100644
41300--- a/drivers/isdn/gigaset/usb-gigaset.c
41301+++ b/drivers/isdn/gigaset/usb-gigaset.c
41302@@ -547,7 +547,7 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
41303 gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
41304 memcpy(cs->hw.usb->bchars, buf, 6);
41305 return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
41306- 0, 0, &buf, 6, 2000);
41307+ 0, 0, buf, 6, 2000);
41308 }
41309
41310 static void gigaset_freebcshw(struct bc_state *bcs)
bb5f0bf8
AF
41311diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
41312index 4d9b195..455075c 100644
41313--- a/drivers/isdn/hardware/avm/b1.c
41314+++ b/drivers/isdn/hardware/avm/b1.c
41315@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
41316 }
41317 if (left) {
41318 if (t4file->user) {
41319- if (copy_from_user(buf, dp, left))
41320+ if (left > sizeof buf || copy_from_user(buf, dp, left))
41321 return -EFAULT;
41322 } else {
41323 memcpy(buf, dp, left);
41324@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
41325 }
41326 if (left) {
41327 if (config->user) {
41328- if (copy_from_user(buf, dp, left))
41329+ if (left > sizeof buf || copy_from_user(buf, dp, left))
41330 return -EFAULT;
41331 } else {
41332 memcpy(buf, dp, left);
e2b79cd1
AF
41333diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
41334index 9bb12ba..d4262f7 100644
41335--- a/drivers/isdn/i4l/isdn_common.c
41336+++ b/drivers/isdn/i4l/isdn_common.c
41337@@ -1651,6 +1651,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
41338 } else
41339 return -EINVAL;
41340 case IIOCDBGVAR:
41341+ if (!capable(CAP_SYS_RAWIO))
41342+ return -EPERM;
41343 if (arg) {
41344 if (copy_to_user(argp, &dev, sizeof(ulong)))
41345 return -EFAULT;
bb5f0bf8
AF
41346diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
41347index 3c5f249..5fac4d0 100644
41348--- a/drivers/isdn/i4l/isdn_tty.c
41349+++ b/drivers/isdn/i4l/isdn_tty.c
41350@@ -1508,9 +1508,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
41351
41352 #ifdef ISDN_DEBUG_MODEM_OPEN
41353 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
41354- port->count);
41355+ atomic_read(&port->count));
41356 #endif
41357- port->count++;
41358+ atomic_inc(&port->count);
41359 port->tty = tty;
41360 /*
41361 * Start up serial port
41362@@ -1554,7 +1554,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
41363 #endif
41364 return;
41365 }
41366- if ((tty->count == 1) && (port->count != 1)) {
41367+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
41368 /*
41369 * Uh, oh. tty->count is 1, which means that the tty
41370 * structure will be freed. Info->count should always
41371@@ -1563,15 +1563,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
41372 * serial port won't be shutdown.
41373 */
41374 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
41375- "info->count is %d\n", port->count);
41376- port->count = 1;
41377+ "info->count is %d\n", atomic_read(&port->count));
41378+ atomic_set(&port->count, 1);
41379 }
41380- if (--port->count < 0) {
41381+ if (atomic_dec_return(&port->count) < 0) {
41382 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
41383- info->line, port->count);
41384- port->count = 0;
41385+ info->line, atomic_read(&port->count));
41386+ atomic_set(&port->count, 0);
41387 }
41388- if (port->count) {
41389+ if (atomic_read(&port->count)) {
41390 #ifdef ISDN_DEBUG_MODEM_OPEN
41391 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
41392 #endif
41393@@ -1625,7 +1625,7 @@ isdn_tty_hangup(struct tty_struct *tty)
41394 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
41395 return;
41396 isdn_tty_shutdown(info);
41397- port->count = 0;
41398+ atomic_set(&port->count, 0);
41399 port->flags &= ~ASYNC_NORMAL_ACTIVE;
41400 port->tty = NULL;
41401 wake_up_interruptible(&port->open_wait);
41402@@ -1970,7 +1970,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
41403 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
41404 modem_info *info = &dev->mdm.info[i];
41405
41406- if (info->port.count == 0)
41407+ if (atomic_read(&info->port.count) == 0)
41408 continue;
41409 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
41410 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
41411diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
41412index e74df7c..03a03ba 100644
41413--- a/drivers/isdn/icn/icn.c
41414+++ b/drivers/isdn/icn/icn.c
41415@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
41416 if (count > len)
41417 count = len;
41418 if (user) {
41419- if (copy_from_user(msg, buf, count))
41420+ if (count > sizeof msg || copy_from_user(msg, buf, count))
41421 return -EFAULT;
41422 } else
41423 memcpy(msg, buf, count);
41424diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
41425index 6a8405d..0bd1c7e 100644
41426--- a/drivers/leds/leds-clevo-mail.c
41427+++ b/drivers/leds/leds-clevo-mail.c
41428@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
41429 * detected as working, but in reality it is not) as low as
41430 * possible.
41431 */
41432-static struct dmi_system_id __initdata clevo_mail_led_dmi_table[] = {
41433+static const struct dmi_system_id __initconst clevo_mail_led_dmi_table[] = {
41434 {
41435 .callback = clevo_mail_led_dmi_callback,
41436 .ident = "Clevo D410J",
41437diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
41438index 64e204e..c6bf189 100644
41439--- a/drivers/leds/leds-ss4200.c
41440+++ b/drivers/leds/leds-ss4200.c
41441@@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
41442 * detected as working, but in reality it is not) as low as
41443 * possible.
41444 */
41445-static struct dmi_system_id __initdata nas_led_whitelist[] = {
41446+static const struct dmi_system_id __initconst nas_led_whitelist[] = {
41447 {
41448 .callback = ss4200_led_dmi_callback,
41449 .ident = "Intel SS4200-E",
41450diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
41451index 0bf1e4e..b4bf44e 100644
41452--- a/drivers/lguest/core.c
41453+++ b/drivers/lguest/core.c
41454@@ -97,9 +97,17 @@ static __init int map_switcher(void)
41455 * The end address needs +1 because __get_vm_area allocates an
41456 * extra guard page, so we need space for that.
41457 */
41458+
41459+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
41460+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
41461+ VM_ALLOC | VM_KERNEXEC, switcher_addr, switcher_addr
41462+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
41463+#else
41464 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
41465 VM_ALLOC, switcher_addr, switcher_addr
41466 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
41467+#endif
41468+
41469 if (!switcher_vma) {
41470 err = -ENOMEM;
41471 printk("lguest: could not map switcher pages high\n");
41472@@ -124,7 +132,7 @@ static __init int map_switcher(void)
41473 * Now the Switcher is mapped at the right address, we can't fail!
41474 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
41475 */
41476- memcpy(switcher_vma->addr, start_switcher_text,
41477+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
41478 end_switcher_text - start_switcher_text);
41479
41480 printk(KERN_INFO "lguest: mapped switcher at %p\n",
41481diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
41482index 5b9ac32..2ef4f26 100644
41483--- a/drivers/lguest/page_tables.c
41484+++ b/drivers/lguest/page_tables.c
41485@@ -559,7 +559,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
41486 /*:*/
41487
41488 #ifdef CONFIG_X86_PAE
41489-static void release_pmd(pmd_t *spmd)
41490+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
41491 {
41492 /* If the entry's not present, there's nothing to release. */
41493 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
41494diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
41495index f0a3347..f6608b2 100644
41496--- a/drivers/lguest/x86/core.c
41497+++ b/drivers/lguest/x86/core.c
41498@@ -59,7 +59,7 @@ static struct {
41499 /* Offset from where switcher.S was compiled to where we've copied it */
41500 static unsigned long switcher_offset(void)
41501 {
41502- return switcher_addr - (unsigned long)start_switcher_text;
41503+ return switcher_addr - (unsigned long)ktla_ktva(start_switcher_text);
41504 }
41505
41506 /* This cpu's struct lguest_pages (after the Switcher text page) */
41507@@ -99,7 +99,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
41508 * These copies are pretty cheap, so we do them unconditionally: */
41509 /* Save the current Host top-level page directory.
41510 */
41511+
41512+#ifdef CONFIG_PAX_PER_CPU_PGD
41513+ pages->state.host_cr3 = read_cr3();
41514+#else
41515 pages->state.host_cr3 = __pa(current->mm->pgd);
41516+#endif
41517+
41518 /*
41519 * Set up the Guest's page tables to see this CPU's pages (and no
41520 * other CPU's pages).
41521@@ -475,7 +481,7 @@ void __init lguest_arch_host_init(void)
41522 * compiled-in switcher code and the high-mapped copy we just made.
41523 */
41524 for (i = 0; i < IDT_ENTRIES; i++)
41525- default_idt_entries[i] += switcher_offset();
41526+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
41527
41528 /*
41529 * Set up the Switcher's per-cpu areas.
41530@@ -558,7 +564,7 @@ void __init lguest_arch_host_init(void)
41531 * it will be undisturbed when we switch. To change %cs and jump we
41532 * need this structure to feed to Intel's "lcall" instruction.
41533 */
41534- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
41535+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
41536 lguest_entry.segment = LGUEST_CS;
41537
41538 /*
41539diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
41540index 40634b0..4f5855e 100644
41541--- a/drivers/lguest/x86/switcher_32.S
41542+++ b/drivers/lguest/x86/switcher_32.S
41543@@ -87,6 +87,7 @@
41544 #include <asm/page.h>
41545 #include <asm/segment.h>
41546 #include <asm/lguest.h>
41547+#include <asm/processor-flags.h>
41548
41549 // We mark the start of the code to copy
41550 // It's placed in .text tho it's never run here
41551@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
41552 // Changes type when we load it: damn Intel!
41553 // For after we switch over our page tables
41554 // That entry will be read-only: we'd crash.
41555+
41556+#ifdef CONFIG_PAX_KERNEXEC
41557+ mov %cr0, %edx
41558+ xor $X86_CR0_WP, %edx
41559+ mov %edx, %cr0
41560+#endif
41561+
41562 movl $(GDT_ENTRY_TSS*8), %edx
41563 ltr %dx
41564
41565@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
41566 // Let's clear it again for our return.
41567 // The GDT descriptor of the Host
41568 // Points to the table after two "size" bytes
41569- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
41570+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
41571 // Clear "used" from type field (byte 5, bit 2)
41572- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
41573+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
41574+
41575+#ifdef CONFIG_PAX_KERNEXEC
41576+ mov %cr0, %eax
41577+ xor $X86_CR0_WP, %eax
41578+ mov %eax, %cr0
41579+#endif
41580
41581 // Once our page table's switched, the Guest is live!
41582 // The Host fades as we run this final step.
41583@@ -295,13 +309,12 @@ deliver_to_host:
41584 // I consulted gcc, and it gave
41585 // These instructions, which I gladly credit:
41586 leal (%edx,%ebx,8), %eax
41587- movzwl (%eax),%edx
41588- movl 4(%eax), %eax
41589- xorw %ax, %ax
41590- orl %eax, %edx
41591+ movl 4(%eax), %edx
41592+ movw (%eax), %dx
41593 // Now the address of the handler's in %edx
41594 // We call it now: its "iret" drops us home.
41595- jmp *%edx
41596+ ljmp $__KERNEL_CS, $1f
41597+1: jmp *%edx
41598
41599 // Every interrupt can come to us here
41600 // But we must truly tell each apart.
41601diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
41602index 0003992..854bbce 100644
41603--- a/drivers/md/bcache/closure.h
41604+++ b/drivers/md/bcache/closure.h
41605@@ -622,7 +622,7 @@ static inline void closure_wake_up(struct closure_waitlist *list)
41606 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
41607 struct workqueue_struct *wq)
41608 {
41609- BUG_ON(object_is_on_stack(cl));
41610+ BUG_ON(object_starts_on_stack(cl));
41611 closure_set_ip(cl);
41612 cl->fn = fn;
41613 cl->wq = wq;
41614diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
41615index 5a2c754..0fa55db 100644
41616--- a/drivers/md/bitmap.c
41617+++ b/drivers/md/bitmap.c
41618@@ -1779,7 +1779,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
41619 chunk_kb ? "KB" : "B");
41620 if (bitmap->storage.file) {
41621 seq_printf(seq, ", file: ");
41622- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
41623+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
41624 }
41625
41626 seq_printf(seq, "\n");
41627diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
41628index 81a79b7..87a0f73 100644
41629--- a/drivers/md/dm-ioctl.c
41630+++ b/drivers/md/dm-ioctl.c
41631@@ -1697,7 +1697,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
41632 cmd == DM_LIST_VERSIONS_CMD)
41633 return 0;
41634
41635- if ((cmd == DM_DEV_CREATE_CMD)) {
41636+ if (cmd == DM_DEV_CREATE_CMD) {
41637 if (!*param->name) {
41638 DMWARN("name not supplied when creating device");
41639 return -EINVAL;
41640diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
41641index 699b5be..eac0a15 100644
41642--- a/drivers/md/dm-raid1.c
41643+++ b/drivers/md/dm-raid1.c
41644@@ -40,7 +40,7 @@ enum dm_raid1_error {
41645
41646 struct mirror {
41647 struct mirror_set *ms;
41648- atomic_t error_count;
41649+ atomic_unchecked_t error_count;
41650 unsigned long error_type;
41651 struct dm_dev *dev;
41652 sector_t offset;
41653@@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
41654 struct mirror *m;
41655
41656 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
41657- if (!atomic_read(&m->error_count))
41658+ if (!atomic_read_unchecked(&m->error_count))
41659 return m;
41660
41661 return NULL;
41662@@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
41663 * simple way to tell if a device has encountered
41664 * errors.
41665 */
41666- atomic_inc(&m->error_count);
41667+ atomic_inc_unchecked(&m->error_count);
41668
41669 if (test_and_set_bit(error_type, &m->error_type))
41670 return;
41671@@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
41672 struct mirror *m = get_default_mirror(ms);
41673
41674 do {
41675- if (likely(!atomic_read(&m->error_count)))
41676+ if (likely(!atomic_read_unchecked(&m->error_count)))
41677 return m;
41678
41679 if (m-- == ms->mirror)
41680@@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
41681 {
41682 struct mirror *default_mirror = get_default_mirror(m->ms);
41683
41684- return !atomic_read(&default_mirror->error_count);
41685+ return !atomic_read_unchecked(&default_mirror->error_count);
41686 }
41687
41688 static int mirror_available(struct mirror_set *ms, struct bio *bio)
41689@@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
41690 */
41691 if (likely(region_in_sync(ms, region, 1)))
41692 m = choose_mirror(ms, bio->bi_sector);
41693- else if (m && atomic_read(&m->error_count))
41694+ else if (m && atomic_read_unchecked(&m->error_count))
41695 m = NULL;
41696
41697 if (likely(m))
41698@@ -927,7 +927,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
41699 }
41700
41701 ms->mirror[mirror].ms = ms;
41702- atomic_set(&(ms->mirror[mirror].error_count), 0);
41703+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
41704 ms->mirror[mirror].error_type = 0;
41705 ms->mirror[mirror].offset = offset;
41706
41707@@ -1340,7 +1340,7 @@ static void mirror_resume(struct dm_target *ti)
41708 */
41709 static char device_status_char(struct mirror *m)
41710 {
41711- if (!atomic_read(&(m->error_count)))
41712+ if (!atomic_read_unchecked(&(m->error_count)))
41713 return 'A';
41714
41715 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
41716diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
41717index d907ca6..cfb8384 100644
41718--- a/drivers/md/dm-stripe.c
41719+++ b/drivers/md/dm-stripe.c
41720@@ -20,7 +20,7 @@ struct stripe {
41721 struct dm_dev *dev;
41722 sector_t physical_start;
41723
41724- atomic_t error_count;
41725+ atomic_unchecked_t error_count;
41726 };
41727
41728 struct stripe_c {
41729@@ -185,7 +185,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
41730 kfree(sc);
41731 return r;
41732 }
41733- atomic_set(&(sc->stripe[i].error_count), 0);
41734+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
41735 }
41736
41737 ti->private = sc;
41738@@ -326,7 +326,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
41739 DMEMIT("%d ", sc->stripes);
41740 for (i = 0; i < sc->stripes; i++) {
41741 DMEMIT("%s ", sc->stripe[i].dev->name);
41742- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
41743+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
41744 'D' : 'A';
41745 }
41746 buffer[i] = '\0';
41747@@ -371,8 +371,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
41748 */
41749 for (i = 0; i < sc->stripes; i++)
41750 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
41751- atomic_inc(&(sc->stripe[i].error_count));
41752- if (atomic_read(&(sc->stripe[i].error_count)) <
41753+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
41754+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
41755 DM_IO_ERROR_THRESHOLD)
41756 schedule_work(&sc->trigger_event);
41757 }
41758diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
41759index 1ff252a..ee384c1 100644
41760--- a/drivers/md/dm-table.c
41761+++ b/drivers/md/dm-table.c
41762@@ -389,7 +389,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
41763 if (!dev_size)
41764 return 0;
41765
41766- if ((start >= dev_size) || (start + len > dev_size)) {
41767+ if ((start >= dev_size) || (len > dev_size - start)) {
41768 DMWARN("%s: %s too small for target: "
41769 "start=%llu, len=%llu, dev_size=%llu",
41770 dm_device_name(ti->table->md), bdevname(bdev, b),
41771diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
41772index 60bce43..9b997d0 100644
41773--- a/drivers/md/dm-thin-metadata.c
41774+++ b/drivers/md/dm-thin-metadata.c
41775@@ -397,7 +397,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
41776 {
41777 pmd->info.tm = pmd->tm;
41778 pmd->info.levels = 2;
41779- pmd->info.value_type.context = pmd->data_sm;
41780+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
41781 pmd->info.value_type.size = sizeof(__le64);
41782 pmd->info.value_type.inc = data_block_inc;
41783 pmd->info.value_type.dec = data_block_dec;
41784@@ -416,7 +416,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
41785
41786 pmd->bl_info.tm = pmd->tm;
41787 pmd->bl_info.levels = 1;
41788- pmd->bl_info.value_type.context = pmd->data_sm;
41789+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
41790 pmd->bl_info.value_type.size = sizeof(__le64);
41791 pmd->bl_info.value_type.inc = data_block_inc;
41792 pmd->bl_info.value_type.dec = data_block_dec;
41793diff --git a/drivers/md/dm.c b/drivers/md/dm.c
41794index 33f2010..23fb84c 100644
41795--- a/drivers/md/dm.c
41796+++ b/drivers/md/dm.c
41797@@ -169,9 +169,9 @@ struct mapped_device {
41798 /*
41799 * Event handling.
41800 */
41801- atomic_t event_nr;
41802+ atomic_unchecked_t event_nr;
41803 wait_queue_head_t eventq;
41804- atomic_t uevent_seq;
41805+ atomic_unchecked_t uevent_seq;
41806 struct list_head uevent_list;
41807 spinlock_t uevent_lock; /* Protect access to uevent_list */
41808
41809@@ -1884,8 +1884,8 @@ static struct mapped_device *alloc_dev(int minor)
41810 rwlock_init(&md->map_lock);
41811 atomic_set(&md->holders, 1);
41812 atomic_set(&md->open_count, 0);
41813- atomic_set(&md->event_nr, 0);
41814- atomic_set(&md->uevent_seq, 0);
41815+ atomic_set_unchecked(&md->event_nr, 0);
41816+ atomic_set_unchecked(&md->uevent_seq, 0);
41817 INIT_LIST_HEAD(&md->uevent_list);
41818 spin_lock_init(&md->uevent_lock);
41819
41820@@ -2033,7 +2033,7 @@ static void event_callback(void *context)
41821
41822 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
41823
41824- atomic_inc(&md->event_nr);
41825+ atomic_inc_unchecked(&md->event_nr);
41826 wake_up(&md->eventq);
41827 }
41828
41829@@ -2690,18 +2690,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
41830
41831 uint32_t dm_next_uevent_seq(struct mapped_device *md)
41832 {
41833- return atomic_add_return(1, &md->uevent_seq);
41834+ return atomic_add_return_unchecked(1, &md->uevent_seq);
41835 }
41836
41837 uint32_t dm_get_event_nr(struct mapped_device *md)
41838 {
41839- return atomic_read(&md->event_nr);
41840+ return atomic_read_unchecked(&md->event_nr);
41841 }
41842
41843 int dm_wait_event(struct mapped_device *md, int event_nr)
41844 {
41845 return wait_event_interruptible(md->eventq,
41846- (event_nr != atomic_read(&md->event_nr)));
41847+ (event_nr != atomic_read_unchecked(&md->event_nr)));
41848 }
41849
41850 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
41851diff --git a/drivers/md/md.c b/drivers/md/md.c
41852index 51f0345..c77810e 100644
41853--- a/drivers/md/md.c
41854+++ b/drivers/md/md.c
41855@@ -234,10 +234,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
41856 * start build, activate spare
41857 */
41858 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
41859-static atomic_t md_event_count;
41860+static atomic_unchecked_t md_event_count;
41861 void md_new_event(struct mddev *mddev)
41862 {
41863- atomic_inc(&md_event_count);
41864+ atomic_inc_unchecked(&md_event_count);
41865 wake_up(&md_event_waiters);
41866 }
41867 EXPORT_SYMBOL_GPL(md_new_event);
41868@@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
41869 */
41870 static void md_new_event_inintr(struct mddev *mddev)
41871 {
41872- atomic_inc(&md_event_count);
41873+ atomic_inc_unchecked(&md_event_count);
41874 wake_up(&md_event_waiters);
41875 }
41876
41877@@ -1501,7 +1501,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
41878 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
41879 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
41880 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
41881- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
41882+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
41883
41884 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
41885 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
41886@@ -1745,7 +1745,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
41887 else
41888 sb->resync_offset = cpu_to_le64(0);
41889
41890- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
41891+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
41892
41893 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
41894 sb->size = cpu_to_le64(mddev->dev_sectors);
41895@@ -2750,7 +2750,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
41896 static ssize_t
41897 errors_show(struct md_rdev *rdev, char *page)
41898 {
41899- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
41900+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
41901 }
41902
41903 static ssize_t
41904@@ -2759,7 +2759,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
41905 char *e;
41906 unsigned long n = simple_strtoul(buf, &e, 10);
41907 if (*buf && (*e == 0 || *e == '\n')) {
41908- atomic_set(&rdev->corrected_errors, n);
41909+ atomic_set_unchecked(&rdev->corrected_errors, n);
41910 return len;
41911 }
41912 return -EINVAL;
41913@@ -3207,8 +3207,8 @@ int md_rdev_init(struct md_rdev *rdev)
41914 rdev->sb_loaded = 0;
41915 rdev->bb_page = NULL;
41916 atomic_set(&rdev->nr_pending, 0);
41917- atomic_set(&rdev->read_errors, 0);
41918- atomic_set(&rdev->corrected_errors, 0);
41919+ atomic_set_unchecked(&rdev->read_errors, 0);
41920+ atomic_set_unchecked(&rdev->corrected_errors, 0);
41921
41922 INIT_LIST_HEAD(&rdev->same_set);
41923 init_waitqueue_head(&rdev->blocked_wait);
41924@@ -7009,7 +7009,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
41925
41926 spin_unlock(&pers_lock);
41927 seq_printf(seq, "\n");
41928- seq->poll_event = atomic_read(&md_event_count);
41929+ seq->poll_event = atomic_read_unchecked(&md_event_count);
41930 return 0;
41931 }
41932 if (v == (void*)2) {
41933@@ -7112,7 +7112,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
41934 return error;
41935
41936 seq = file->private_data;
41937- seq->poll_event = atomic_read(&md_event_count);
41938+ seq->poll_event = atomic_read_unchecked(&md_event_count);
41939 return error;
41940 }
41941
41942@@ -7126,7 +7126,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
41943 /* always allow read */
41944 mask = POLLIN | POLLRDNORM;
41945
41946- if (seq->poll_event != atomic_read(&md_event_count))
41947+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
41948 mask |= POLLERR | POLLPRI;
41949 return mask;
41950 }
41951@@ -7170,7 +7170,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
41952 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
41953 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
41954 (int)part_stat_read(&disk->part0, sectors[1]) -
41955- atomic_read(&disk->sync_io);
41956+ atomic_read_unchecked(&disk->sync_io);
41957 /* sync IO will cause sync_io to increase before the disk_stats
41958 * as sync_io is counted when a request starts, and
41959 * disk_stats is counted when it completes.
41960diff --git a/drivers/md/md.h b/drivers/md/md.h
41961index 653f992b6..6af6c40 100644
41962--- a/drivers/md/md.h
41963+++ b/drivers/md/md.h
41964@@ -94,13 +94,13 @@ struct md_rdev {
41965 * only maintained for arrays that
41966 * support hot removal
41967 */
41968- atomic_t read_errors; /* number of consecutive read errors that
41969+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
41970 * we have tried to ignore.
41971 */
41972 struct timespec last_read_error; /* monotonic time since our
41973 * last read error
41974 */
41975- atomic_t corrected_errors; /* number of corrected read errors,
41976+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
41977 * for reporting to userspace and storing
41978 * in superblock.
41979 */
41980@@ -434,7 +434,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
41981
41982 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
41983 {
41984- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
41985+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
41986 }
41987
41988 struct md_personality
41989diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
41990index 3e6d115..ffecdeb 100644
41991--- a/drivers/md/persistent-data/dm-space-map.h
41992+++ b/drivers/md/persistent-data/dm-space-map.h
41993@@ -71,6 +71,7 @@ struct dm_space_map {
41994 dm_sm_threshold_fn fn,
41995 void *context);
41996 };
41997+typedef struct dm_space_map __no_const dm_space_map_no_const;
41998
41999 /*----------------------------------------------------------------*/
42000
42001diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
42002index 6f48244..7d29145 100644
42003--- a/drivers/md/raid1.c
42004+++ b/drivers/md/raid1.c
42005@@ -1822,7 +1822,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
42006 if (r1_sync_page_io(rdev, sect, s,
42007 bio->bi_io_vec[idx].bv_page,
42008 READ) != 0)
42009- atomic_add(s, &rdev->corrected_errors);
42010+ atomic_add_unchecked(s, &rdev->corrected_errors);
42011 }
42012 sectors -= s;
42013 sect += s;
42014@@ -2049,7 +2049,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
42015 test_bit(In_sync, &rdev->flags)) {
42016 if (r1_sync_page_io(rdev, sect, s,
42017 conf->tmppage, READ)) {
42018- atomic_add(s, &rdev->corrected_errors);
42019+ atomic_add_unchecked(s, &rdev->corrected_errors);
42020 printk(KERN_INFO
42021 "md/raid1:%s: read error corrected "
42022 "(%d sectors at %llu on %s)\n",
42023diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
42024index 081bb33..3c4b287 100644
42025--- a/drivers/md/raid10.c
42026+++ b/drivers/md/raid10.c
42027@@ -1940,7 +1940,7 @@ static void end_sync_read(struct bio *bio, int error)
42028 /* The write handler will notice the lack of
42029 * R10BIO_Uptodate and record any errors etc
42030 */
42031- atomic_add(r10_bio->sectors,
42032+ atomic_add_unchecked(r10_bio->sectors,
42033 &conf->mirrors[d].rdev->corrected_errors);
42034
42035 /* for reconstruct, we always reschedule after a read.
42036@@ -2298,7 +2298,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
42037 {
42038 struct timespec cur_time_mon;
42039 unsigned long hours_since_last;
42040- unsigned int read_errors = atomic_read(&rdev->read_errors);
42041+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
42042
42043 ktime_get_ts(&cur_time_mon);
42044
42045@@ -2320,9 +2320,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
42046 * overflowing the shift of read_errors by hours_since_last.
42047 */
42048 if (hours_since_last >= 8 * sizeof(read_errors))
42049- atomic_set(&rdev->read_errors, 0);
42050+ atomic_set_unchecked(&rdev->read_errors, 0);
42051 else
42052- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
42053+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
42054 }
42055
42056 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
42057@@ -2376,8 +2376,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
42058 return;
42059
42060 check_decay_read_errors(mddev, rdev);
42061- atomic_inc(&rdev->read_errors);
42062- if (atomic_read(&rdev->read_errors) > max_read_errors) {
42063+ atomic_inc_unchecked(&rdev->read_errors);
42064+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
42065 char b[BDEVNAME_SIZE];
42066 bdevname(rdev->bdev, b);
42067
42068@@ -2385,7 +2385,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
42069 "md/raid10:%s: %s: Raid device exceeded "
42070 "read_error threshold [cur %d:max %d]\n",
42071 mdname(mddev), b,
42072- atomic_read(&rdev->read_errors), max_read_errors);
42073+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
42074 printk(KERN_NOTICE
42075 "md/raid10:%s: %s: Failing raid device\n",
42076 mdname(mddev), b);
42077@@ -2540,7 +2540,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
42078 sect +
42079 choose_data_offset(r10_bio, rdev)),
42080 bdevname(rdev->bdev, b));
42081- atomic_add(s, &rdev->corrected_errors);
42082+ atomic_add_unchecked(s, &rdev->corrected_errors);
42083 }
42084
42085 rdev_dec_pending(rdev, mddev);
42086diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
42087index a35b846..e295c6d 100644
42088--- a/drivers/md/raid5.c
42089+++ b/drivers/md/raid5.c
42090@@ -1764,21 +1764,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
42091 mdname(conf->mddev), STRIPE_SECTORS,
42092 (unsigned long long)s,
42093 bdevname(rdev->bdev, b));
42094- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
42095+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
42096 clear_bit(R5_ReadError, &sh->dev[i].flags);
42097 clear_bit(R5_ReWrite, &sh->dev[i].flags);
42098 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
42099 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
42100
42101- if (atomic_read(&rdev->read_errors))
42102- atomic_set(&rdev->read_errors, 0);
42103+ if (atomic_read_unchecked(&rdev->read_errors))
42104+ atomic_set_unchecked(&rdev->read_errors, 0);
42105 } else {
42106 const char *bdn = bdevname(rdev->bdev, b);
42107 int retry = 0;
42108 int set_bad = 0;
42109
42110 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
42111- atomic_inc(&rdev->read_errors);
42112+ atomic_inc_unchecked(&rdev->read_errors);
42113 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
42114 printk_ratelimited(
42115 KERN_WARNING
42116@@ -1806,7 +1806,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
42117 mdname(conf->mddev),
42118 (unsigned long long)s,
42119 bdn);
42120- } else if (atomic_read(&rdev->read_errors)
42121+ } else if (atomic_read_unchecked(&rdev->read_errors)
42122 > conf->max_nr_stripes)
42123 printk(KERN_WARNING
42124 "md/raid:%s: Too many read errors, failing device %s.\n",
42125diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
42126index 401ef64..836e563 100644
42127--- a/drivers/media/dvb-core/dvbdev.c
42128+++ b/drivers/media/dvb-core/dvbdev.c
42129@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
42130 const struct dvb_device *template, void *priv, int type)
42131 {
42132 struct dvb_device *dvbdev;
42133- struct file_operations *dvbdevfops;
42134+ file_operations_no_const *dvbdevfops;
42135 struct device *clsdev;
42136 int minor;
42137 int id;
42138diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
42139index 9b6c3bb..baeb5c7 100644
42140--- a/drivers/media/dvb-frontends/dib3000.h
42141+++ b/drivers/media/dvb-frontends/dib3000.h
42142@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
42143 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
42144 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
42145 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
42146-};
42147+} __no_const;
42148
42149 #if IS_ENABLED(CONFIG_DVB_DIB3000MB)
42150 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
42151diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
42152index c7a9be1..683f6f8 100644
42153--- a/drivers/media/pci/cx88/cx88-video.c
42154+++ b/drivers/media/pci/cx88/cx88-video.c
42155@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
42156
42157 /* ------------------------------------------------------------------ */
42158
42159-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
42160-static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
42161-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
42162+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
42163+static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
42164+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
42165
42166 module_param_array(video_nr, int, NULL, 0444);
42167 module_param_array(vbi_nr, int, NULL, 0444);
42168diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
42169index d338b19..aae4f9e 100644
42170--- a/drivers/media/platform/omap/omap_vout.c
42171+++ b/drivers/media/platform/omap/omap_vout.c
42172@@ -63,7 +63,6 @@ enum omap_vout_channels {
42173 OMAP_VIDEO2,
42174 };
42175
42176-static struct videobuf_queue_ops video_vbq_ops;
42177 /* Variables configurable through module params*/
42178 static u32 video1_numbuffers = 3;
42179 static u32 video2_numbuffers = 3;
42180@@ -1015,6 +1014,12 @@ static int omap_vout_open(struct file *file)
42181 {
42182 struct videobuf_queue *q;
42183 struct omap_vout_device *vout = NULL;
42184+ static struct videobuf_queue_ops video_vbq_ops = {
42185+ .buf_setup = omap_vout_buffer_setup,
42186+ .buf_prepare = omap_vout_buffer_prepare,
42187+ .buf_release = omap_vout_buffer_release,
42188+ .buf_queue = omap_vout_buffer_queue,
42189+ };
42190
42191 vout = video_drvdata(file);
42192 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
42193@@ -1032,10 +1037,6 @@ static int omap_vout_open(struct file *file)
42194 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
42195
42196 q = &vout->vbq;
42197- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
42198- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
42199- video_vbq_ops.buf_release = omap_vout_buffer_release;
42200- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
42201 spin_lock_init(&vout->vbq_lock);
42202
42203 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
42204diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
42205index 04e6490..2df65bf 100644
42206--- a/drivers/media/platform/s5p-tv/mixer.h
42207+++ b/drivers/media/platform/s5p-tv/mixer.h
42208@@ -156,7 +156,7 @@ struct mxr_layer {
42209 /** layer index (unique identifier) */
42210 int idx;
42211 /** callbacks for layer methods */
42212- struct mxr_layer_ops ops;
42213+ struct mxr_layer_ops *ops;
42214 /** format array */
42215 const struct mxr_format **fmt_array;
42216 /** size of format array */
42217diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
42218index b93a21f..2535195 100644
42219--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
42220+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
42221@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
42222 {
42223 struct mxr_layer *layer;
42224 int ret;
42225- struct mxr_layer_ops ops = {
42226+ static struct mxr_layer_ops ops = {
42227 .release = mxr_graph_layer_release,
42228 .buffer_set = mxr_graph_buffer_set,
42229 .stream_set = mxr_graph_stream_set,
42230diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
42231index b713403..53cb5ad 100644
42232--- a/drivers/media/platform/s5p-tv/mixer_reg.c
42233+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
42234@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
42235 layer->update_buf = next;
42236 }
42237
42238- layer->ops.buffer_set(layer, layer->update_buf);
42239+ layer->ops->buffer_set(layer, layer->update_buf);
42240
42241 if (done && done != layer->shadow_buf)
42242 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
42243diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
42244index ef0efdf..8c78eb6 100644
42245--- a/drivers/media/platform/s5p-tv/mixer_video.c
42246+++ b/drivers/media/platform/s5p-tv/mixer_video.c
42247@@ -209,7 +209,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
42248 layer->geo.src.height = layer->geo.src.full_height;
42249
42250 mxr_geometry_dump(mdev, &layer->geo);
42251- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
42252+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
42253 mxr_geometry_dump(mdev, &layer->geo);
42254 }
42255
42256@@ -227,7 +227,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
42257 layer->geo.dst.full_width = mbus_fmt.width;
42258 layer->geo.dst.full_height = mbus_fmt.height;
42259 layer->geo.dst.field = mbus_fmt.field;
42260- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
42261+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
42262
42263 mxr_geometry_dump(mdev, &layer->geo);
42264 }
42265@@ -333,7 +333,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
42266 /* set source size to highest accepted value */
42267 geo->src.full_width = max(geo->dst.full_width, pix->width);
42268 geo->src.full_height = max(geo->dst.full_height, pix->height);
42269- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
42270+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
42271 mxr_geometry_dump(mdev, &layer->geo);
42272 /* set cropping to total visible screen */
42273 geo->src.width = pix->width;
42274@@ -341,12 +341,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
42275 geo->src.x_offset = 0;
42276 geo->src.y_offset = 0;
42277 /* assure consistency of geometry */
42278- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
42279+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
42280 mxr_geometry_dump(mdev, &layer->geo);
42281 /* set full size to lowest possible value */
42282 geo->src.full_width = 0;
42283 geo->src.full_height = 0;
42284- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
42285+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
42286 mxr_geometry_dump(mdev, &layer->geo);
42287
42288 /* returning results */
42289@@ -473,7 +473,7 @@ static int mxr_s_selection(struct file *file, void *fh,
42290 target->width = s->r.width;
42291 target->height = s->r.height;
42292
42293- layer->ops.fix_geometry(layer, stage, s->flags);
42294+ layer->ops->fix_geometry(layer, stage, s->flags);
42295
42296 /* retrieve update selection rectangle */
42297 res.left = target->x_offset;
42298@@ -954,13 +954,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
42299 mxr_output_get(mdev);
42300
42301 mxr_layer_update_output(layer);
42302- layer->ops.format_set(layer);
42303+ layer->ops->format_set(layer);
42304 /* enabling layer in hardware */
42305 spin_lock_irqsave(&layer->enq_slock, flags);
42306 layer->state = MXR_LAYER_STREAMING;
42307 spin_unlock_irqrestore(&layer->enq_slock, flags);
42308
42309- layer->ops.stream_set(layer, MXR_ENABLE);
42310+ layer->ops->stream_set(layer, MXR_ENABLE);
42311 mxr_streamer_get(mdev);
42312
42313 return 0;
42314@@ -1030,7 +1030,7 @@ static int stop_streaming(struct vb2_queue *vq)
42315 spin_unlock_irqrestore(&layer->enq_slock, flags);
42316
42317 /* disabling layer in hardware */
42318- layer->ops.stream_set(layer, MXR_DISABLE);
42319+ layer->ops->stream_set(layer, MXR_DISABLE);
42320 /* remove one streamer */
42321 mxr_streamer_put(mdev);
42322 /* allow changes in output configuration */
42323@@ -1069,8 +1069,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
42324
42325 void mxr_layer_release(struct mxr_layer *layer)
42326 {
42327- if (layer->ops.release)
42328- layer->ops.release(layer);
42329+ if (layer->ops->release)
42330+ layer->ops->release(layer);
42331 }
42332
42333 void mxr_base_layer_release(struct mxr_layer *layer)
42334@@ -1096,7 +1096,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
42335
42336 layer->mdev = mdev;
42337 layer->idx = idx;
42338- layer->ops = *ops;
42339+ layer->ops = ops;
42340
42341 spin_lock_init(&layer->enq_slock);
42342 INIT_LIST_HEAD(&layer->enq_list);
42343diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
42344index 3d13a63..da31bf1 100644
42345--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
42346+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
42347@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
42348 {
42349 struct mxr_layer *layer;
42350 int ret;
42351- struct mxr_layer_ops ops = {
42352+ static struct mxr_layer_ops ops = {
42353 .release = mxr_vp_layer_release,
42354 .buffer_set = mxr_vp_buffer_set,
42355 .stream_set = mxr_vp_stream_set,
42356diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
42357index 545c04c..a14bded 100644
42358--- a/drivers/media/radio/radio-cadet.c
42359+++ b/drivers/media/radio/radio-cadet.c
42360@@ -324,6 +324,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
42361 unsigned char readbuf[RDS_BUFFER];
42362 int i = 0;
42363
42364+ if (count > RDS_BUFFER)
42365+ return -EFAULT;
42366 mutex_lock(&dev->lock);
42367 if (dev->rdsstat == 0)
42368 cadet_start_rds(dev);
42369@@ -339,7 +341,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
42370 while (i < count && dev->rdsin != dev->rdsout)
42371 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
42372
42373- if (i && copy_to_user(data, readbuf, i))
42374+ if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
42375 i = -EFAULT;
42376 unlock:
42377 mutex_unlock(&dev->lock);
42378diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
42379index 3940bb0..fb3952a 100644
42380--- a/drivers/media/usb/dvb-usb/cxusb.c
42381+++ b/drivers/media/usb/dvb-usb/cxusb.c
42382@@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
42383
42384 struct dib0700_adapter_state {
42385 int (*set_param_save) (struct dvb_frontend *);
42386-};
42387+} __no_const;
42388
42389 static int dib7070_set_param_override(struct dvb_frontend *fe)
42390 {
42391diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
42392index 6e237b6..dc25556 100644
42393--- a/drivers/media/usb/dvb-usb/dw2102.c
42394+++ b/drivers/media/usb/dvb-usb/dw2102.c
42395@@ -118,7 +118,7 @@ struct su3000_state {
42396
42397 struct s6x0_state {
42398 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
42399-};
42400+} __no_const;
42401
42402 /* debug */
42403 static int dvb_usb_dw2102_debug;
42404diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
42405index f129551..ecf6514 100644
42406--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
42407+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
42408@@ -326,7 +326,7 @@ struct v4l2_buffer32 {
42409 __u32 reserved;
42410 };
42411
42412-static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
42413+static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
42414 enum v4l2_memory memory)
42415 {
42416 void __user *up_pln;
42417@@ -355,7 +355,7 @@ static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
42418 return 0;
42419 }
42420
42421-static int put_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
42422+static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
42423 enum v4l2_memory memory)
42424 {
42425 if (copy_in_user(up32, up, 2 * sizeof(__u32)) ||
42426@@ -772,7 +772,7 @@ static int put_v4l2_subdev_edid32(struct v4l2_subdev_edid *kp, struct v4l2_subde
42427 put_user(kp->start_block, &up->start_block) ||
42428 put_user(kp->blocks, &up->blocks) ||
42429 put_user(tmp, &up->edid) ||
42430- copy_to_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
42431+ copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved)))
42432 return -EFAULT;
42433 return 0;
42434 }
42435diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
42436index 7658586..1079260 100644
42437--- a/drivers/media/v4l2-core/v4l2-ioctl.c
42438+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
42439@@ -1995,7 +1995,8 @@ struct v4l2_ioctl_info {
42440 struct file *file, void *fh, void *p);
42441 } u;
42442 void (*debug)(const void *arg, bool write_only);
42443-};
42444+} __do_const;
42445+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
42446
42447 /* This control needs a priority check */
42448 #define INFO_FL_PRIO (1 << 0)
42449@@ -2177,7 +2178,7 @@ static long __video_do_ioctl(struct file *file,
42450 struct video_device *vfd = video_devdata(file);
42451 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
42452 bool write_only = false;
42453- struct v4l2_ioctl_info default_info;
42454+ v4l2_ioctl_info_no_const default_info;
42455 const struct v4l2_ioctl_info *info;
42456 void *fh = file->private_data;
42457 struct v4l2_fh *vfh = NULL;
42458@@ -2251,7 +2252,7 @@ done:
42459 }
42460
42461 static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
42462- void * __user *user_ptr, void ***kernel_ptr)
42463+ void __user **user_ptr, void ***kernel_ptr)
42464 {
42465 int ret = 0;
42466
42467@@ -2267,7 +2268,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
42468 ret = -EINVAL;
42469 break;
42470 }
42471- *user_ptr = (void __user *)buf->m.planes;
42472+ *user_ptr = (void __force_user *)buf->m.planes;
42473 *kernel_ptr = (void *)&buf->m.planes;
42474 *array_size = sizeof(struct v4l2_plane) * buf->length;
42475 ret = 1;
42476@@ -2302,7 +2303,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
42477 ret = -EINVAL;
42478 break;
42479 }
42480- *user_ptr = (void __user *)ctrls->controls;
42481+ *user_ptr = (void __force_user *)ctrls->controls;
42482 *kernel_ptr = (void *)&ctrls->controls;
42483 *array_size = sizeof(struct v4l2_ext_control)
42484 * ctrls->count;
42485diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
42486index 767ff4d..c69d259 100644
42487--- a/drivers/message/fusion/mptbase.c
42488+++ b/drivers/message/fusion/mptbase.c
42489@@ -6755,8 +6755,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
42490 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
42491 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
42492
42493+#ifdef CONFIG_GRKERNSEC_HIDESYM
42494+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
42495+#else
42496 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
42497 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
42498+#endif
42499+
42500 /*
42501 * Rounding UP to nearest 4-kB boundary here...
42502 */
42503@@ -6769,7 +6774,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
42504 ioc->facts.GlobalCredits);
42505
42506 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
42507+#ifdef CONFIG_GRKERNSEC_HIDESYM
42508+ NULL, NULL);
42509+#else
42510 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
42511+#endif
42512 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
42513 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
42514 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
42515diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
42516index dd239bd..689c4f7 100644
42517--- a/drivers/message/fusion/mptsas.c
42518+++ b/drivers/message/fusion/mptsas.c
42519@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
42520 return 0;
42521 }
42522
42523+static inline void
42524+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
42525+{
42526+ if (phy_info->port_details) {
42527+ phy_info->port_details->rphy = rphy;
42528+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
42529+ ioc->name, rphy));
42530+ }
42531+
42532+ if (rphy) {
42533+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
42534+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
42535+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
42536+ ioc->name, rphy, rphy->dev.release));
42537+ }
42538+}
42539+
42540 /* no mutex */
42541 static void
42542 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
42543@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
42544 return NULL;
42545 }
42546
42547-static inline void
42548-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
42549-{
42550- if (phy_info->port_details) {
42551- phy_info->port_details->rphy = rphy;
42552- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
42553- ioc->name, rphy));
42554- }
42555-
42556- if (rphy) {
42557- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
42558- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
42559- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
42560- ioc->name, rphy, rphy->dev.release));
42561- }
42562-}
42563-
42564 static inline struct sas_port *
42565 mptsas_get_port(struct mptsas_phyinfo *phy_info)
42566 {
42567diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
42568index 727819c..ad74694 100644
42569--- a/drivers/message/fusion/mptscsih.c
42570+++ b/drivers/message/fusion/mptscsih.c
42571@@ -1271,15 +1271,16 @@ mptscsih_info(struct Scsi_Host *SChost)
42572
42573 h = shost_priv(SChost);
42574
42575- if (h) {
42576- if (h->info_kbuf == NULL)
42577- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
42578- return h->info_kbuf;
42579- h->info_kbuf[0] = '\0';
42580+ if (!h)
42581+ return NULL;
42582
42583- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
42584- h->info_kbuf[size-1] = '\0';
42585- }
42586+ if (h->info_kbuf == NULL)
42587+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
42588+ return h->info_kbuf;
42589+ h->info_kbuf[0] = '\0';
42590+
42591+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
42592+ h->info_kbuf[size-1] = '\0';
42593
42594 return h->info_kbuf;
42595 }
42596diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
42597index b7d87cd..9890039 100644
42598--- a/drivers/message/i2o/i2o_proc.c
42599+++ b/drivers/message/i2o/i2o_proc.c
42600@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
42601 "Array Controller Device"
42602 };
42603
42604-static char *chtostr(char *tmp, u8 *chars, int n)
42605-{
42606- tmp[0] = 0;
42607- return strncat(tmp, (char *)chars, n);
42608-}
42609-
42610 static int i2o_report_query_status(struct seq_file *seq, int block_status,
42611 char *group)
42612 {
42613@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
42614 } *result;
42615
42616 i2o_exec_execute_ddm_table ddm_table;
42617- char tmp[28 + 1];
42618
42619 result = kmalloc(sizeof(*result), GFP_KERNEL);
42620 if (!result)
42621@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
42622
42623 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
42624 seq_printf(seq, "%-#8x", ddm_table.module_id);
42625- seq_printf(seq, "%-29s",
42626- chtostr(tmp, ddm_table.module_name_version, 28));
42627+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
42628 seq_printf(seq, "%9d ", ddm_table.data_size);
42629 seq_printf(seq, "%8d", ddm_table.code_size);
42630
42631@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
42632
42633 i2o_driver_result_table *result;
42634 i2o_driver_store_table *dst;
42635- char tmp[28 + 1];
42636
42637 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
42638 if (result == NULL)
42639@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
42640
42641 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
42642 seq_printf(seq, "%-#8x", dst->module_id);
42643- seq_printf(seq, "%-29s",
42644- chtostr(tmp, dst->module_name_version, 28));
42645- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
42646+ seq_printf(seq, "%-.28s", dst->module_name_version);
42647+ seq_printf(seq, "%-.8s", dst->date);
42648 seq_printf(seq, "%8d ", dst->module_size);
42649 seq_printf(seq, "%8d ", dst->mpb_size);
42650 seq_printf(seq, "0x%04x", dst->module_flags);
42651@@ -1250,7 +1240,6 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
42652 // == (allow) 512d bytes (max)
42653 static u16 *work16 = (u16 *) work32;
42654 int token;
42655- char tmp[16 + 1];
42656
42657 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
42658
42659@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
42660 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
42661 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
42662 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
42663- seq_printf(seq, "Vendor info : %s\n",
42664- chtostr(tmp, (u8 *) (work32 + 2), 16));
42665- seq_printf(seq, "Product info : %s\n",
42666- chtostr(tmp, (u8 *) (work32 + 6), 16));
42667- seq_printf(seq, "Description : %s\n",
42668- chtostr(tmp, (u8 *) (work32 + 10), 16));
42669- seq_printf(seq, "Product rev. : %s\n",
42670- chtostr(tmp, (u8 *) (work32 + 14), 8));
42671+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
42672+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
42673+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
42674+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
42675
42676 seq_printf(seq, "Serial number : ");
42677 print_serial_number(seq, (u8 *) (work32 + 16),
42678@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
42679 u8 pad[256]; // allow up to 256 byte (max) serial number
42680 } result;
42681
42682- char tmp[24 + 1];
42683-
42684 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
42685
42686 if (token < 0) {
42687@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
42688 }
42689
42690 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
42691- seq_printf(seq, "Module name : %s\n",
42692- chtostr(tmp, result.module_name, 24));
42693- seq_printf(seq, "Module revision : %s\n",
42694- chtostr(tmp, result.module_rev, 8));
42695+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
42696+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
42697
42698 seq_printf(seq, "Serial number : ");
42699 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
42700@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
42701 u8 instance_number[4];
42702 } result;
42703
42704- char tmp[64 + 1];
42705-
42706 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
42707
42708 if (token < 0) {
42709@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
42710 return 0;
42711 }
42712
42713- seq_printf(seq, "Device name : %s\n",
42714- chtostr(tmp, result.device_name, 64));
42715- seq_printf(seq, "Service name : %s\n",
42716- chtostr(tmp, result.service_name, 64));
42717- seq_printf(seq, "Physical name : %s\n",
42718- chtostr(tmp, result.physical_location, 64));
42719- seq_printf(seq, "Instance number : %s\n",
42720- chtostr(tmp, result.instance_number, 4));
42721+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
42722+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
42723+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
42724+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
42725
42726 return 0;
42727 }
42728diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
42729index a8c08f3..155fe3d 100644
42730--- a/drivers/message/i2o/iop.c
42731+++ b/drivers/message/i2o/iop.c
42732@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
42733
42734 spin_lock_irqsave(&c->context_list_lock, flags);
42735
42736- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
42737- atomic_inc(&c->context_list_counter);
42738+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
42739+ atomic_inc_unchecked(&c->context_list_counter);
42740
42741- entry->context = atomic_read(&c->context_list_counter);
42742+ entry->context = atomic_read_unchecked(&c->context_list_counter);
42743
42744 list_add(&entry->list, &c->context_list);
42745
42746@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
42747
42748 #if BITS_PER_LONG == 64
42749 spin_lock_init(&c->context_list_lock);
42750- atomic_set(&c->context_list_counter, 0);
42751+ atomic_set_unchecked(&c->context_list_counter, 0);
42752 INIT_LIST_HEAD(&c->context_list);
42753 #endif
42754
42755diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
42756index 45ece11..8efa218 100644
42757--- a/drivers/mfd/janz-cmodio.c
42758+++ b/drivers/mfd/janz-cmodio.c
42759@@ -13,6 +13,7 @@
42760
42761 #include <linux/kernel.h>
42762 #include <linux/module.h>
42763+#include <linux/slab.h>
42764 #include <linux/init.h>
42765 #include <linux/pci.h>
42766 #include <linux/interrupt.h>
42767diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
42768index a5f9888..1c0ed56 100644
42769--- a/drivers/mfd/twl4030-irq.c
42770+++ b/drivers/mfd/twl4030-irq.c
42771@@ -35,6 +35,7 @@
42772 #include <linux/of.h>
42773 #include <linux/irqdomain.h>
42774 #include <linux/i2c/twl.h>
42775+#include <asm/pgtable.h>
42776
42777 #include "twl-core.h"
42778
42779@@ -728,10 +729,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
42780 * Install an irq handler for each of the SIH modules;
42781 * clone dummy irq_chip since PIH can't *do* anything
42782 */
42783- twl4030_irq_chip = dummy_irq_chip;
42784- twl4030_irq_chip.name = "twl4030";
42785+ pax_open_kernel();
42786+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
42787+ *(const char **)&twl4030_irq_chip.name = "twl4030";
42788
42789- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
42790+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
42791+ pax_close_kernel();
42792
42793 for (i = irq_base; i < irq_end; i++) {
42794 irq_set_chip_and_handler(i, &twl4030_irq_chip,
42795diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
42796index 277a8db..0e0b754 100644
42797--- a/drivers/mfd/twl6030-irq.c
42798+++ b/drivers/mfd/twl6030-irq.c
42799@@ -387,10 +387,12 @@ int twl6030_init_irq(struct device *dev, int irq_num)
42800 * install an irq handler for each of the modules;
42801 * clone dummy irq_chip since PIH can't *do* anything
42802 */
42803- twl6030_irq_chip = dummy_irq_chip;
42804- twl6030_irq_chip.name = "twl6030";
42805- twl6030_irq_chip.irq_set_type = NULL;
42806- twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
42807+ pax_open_kernel();
42808+ memcpy((void *)&twl6030_irq_chip, &dummy_irq_chip, sizeof twl6030_irq_chip);
42809+ *(const char **)&twl6030_irq_chip.name = "twl6030";
42810+ *(void **)&twl6030_irq_chip.irq_set_type = NULL;
42811+ *(void **)&twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
42812+ pax_close_kernel();
42813
42814 for (i = irq_base; i < irq_end; i++) {
42815 irq_set_chip_and_handler(i, &twl6030_irq_chip,
42816diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
42817index f32550a..e3e52a2 100644
42818--- a/drivers/misc/c2port/core.c
42819+++ b/drivers/misc/c2port/core.c
42820@@ -920,7 +920,9 @@ struct c2port_device *c2port_device_register(char *name,
42821 mutex_init(&c2dev->mutex);
42822
42823 /* Create binary file */
42824- c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
42825+ pax_open_kernel();
42826+ *(size_t *)&c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
42827+ pax_close_kernel();
42828 ret = device_create_bin_file(c2dev->dev, &c2port_bin_attrs);
42829 if (unlikely(ret))
42830 goto error_device_create_bin_file;
42831diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
42832index 36f5d52..32311c3 100644
42833--- a/drivers/misc/kgdbts.c
42834+++ b/drivers/misc/kgdbts.c
42835@@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
42836 char before[BREAK_INSTR_SIZE];
42837 char after[BREAK_INSTR_SIZE];
42838
42839- probe_kernel_read(before, (char *)kgdbts_break_test,
42840+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
42841 BREAK_INSTR_SIZE);
42842 init_simple_test();
42843 ts.tst = plant_and_detach_test;
42844@@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
42845 /* Activate test with initial breakpoint */
42846 if (!is_early)
42847 kgdb_breakpoint();
42848- probe_kernel_read(after, (char *)kgdbts_break_test,
42849+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
42850 BREAK_INSTR_SIZE);
42851 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
42852 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
42853diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
42854index 4cd4a3d..b48cbc7 100644
42855--- a/drivers/misc/lis3lv02d/lis3lv02d.c
42856+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
42857@@ -498,7 +498,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
42858 * the lid is closed. This leads to interrupts as soon as a little move
42859 * is done.
42860 */
42861- atomic_inc(&lis3->count);
42862+ atomic_inc_unchecked(&lis3->count);
42863
42864 wake_up_interruptible(&lis3->misc_wait);
42865 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
42866@@ -584,7 +584,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
42867 if (lis3->pm_dev)
42868 pm_runtime_get_sync(lis3->pm_dev);
42869
42870- atomic_set(&lis3->count, 0);
42871+ atomic_set_unchecked(&lis3->count, 0);
42872 return 0;
42873 }
42874
42875@@ -616,7 +616,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
42876 add_wait_queue(&lis3->misc_wait, &wait);
42877 while (true) {
42878 set_current_state(TASK_INTERRUPTIBLE);
42879- data = atomic_xchg(&lis3->count, 0);
42880+ data = atomic_xchg_unchecked(&lis3->count, 0);
42881 if (data)
42882 break;
42883
42884@@ -657,7 +657,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
42885 struct lis3lv02d, miscdev);
42886
42887 poll_wait(file, &lis3->misc_wait, wait);
42888- if (atomic_read(&lis3->count))
42889+ if (atomic_read_unchecked(&lis3->count))
42890 return POLLIN | POLLRDNORM;
42891 return 0;
42892 }
42893diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
42894index c439c82..1f20f57 100644
42895--- a/drivers/misc/lis3lv02d/lis3lv02d.h
42896+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
42897@@ -297,7 +297,7 @@ struct lis3lv02d {
42898 struct input_polled_dev *idev; /* input device */
42899 struct platform_device *pdev; /* platform device */
42900 struct regulator_bulk_data regulators[2];
42901- atomic_t count; /* interrupt count after last read */
42902+ atomic_unchecked_t count; /* interrupt count after last read */
42903 union axis_conversion ac; /* hw -> logical axis */
42904 int mapped_btns[3];
42905
42906diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
42907index 2f30bad..c4c13d0 100644
42908--- a/drivers/misc/sgi-gru/gruhandles.c
42909+++ b/drivers/misc/sgi-gru/gruhandles.c
42910@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
42911 unsigned long nsec;
42912
42913 nsec = CLKS2NSEC(clks);
42914- atomic_long_inc(&mcs_op_statistics[op].count);
42915- atomic_long_add(nsec, &mcs_op_statistics[op].total);
42916+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
42917+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
42918 if (mcs_op_statistics[op].max < nsec)
42919 mcs_op_statistics[op].max = nsec;
42920 }
42921diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
42922index 797d796..ae8f01e 100644
42923--- a/drivers/misc/sgi-gru/gruprocfs.c
42924+++ b/drivers/misc/sgi-gru/gruprocfs.c
42925@@ -32,9 +32,9 @@
42926
42927 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
42928
42929-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
42930+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
42931 {
42932- unsigned long val = atomic_long_read(v);
42933+ unsigned long val = atomic_long_read_unchecked(v);
42934
42935 seq_printf(s, "%16lu %s\n", val, id);
42936 }
42937@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
42938
42939 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
42940 for (op = 0; op < mcsop_last; op++) {
42941- count = atomic_long_read(&mcs_op_statistics[op].count);
42942- total = atomic_long_read(&mcs_op_statistics[op].total);
42943+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
42944+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
42945 max = mcs_op_statistics[op].max;
42946 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
42947 count ? total / count : 0, max);
42948diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
42949index 5c3ce24..4915ccb 100644
42950--- a/drivers/misc/sgi-gru/grutables.h
42951+++ b/drivers/misc/sgi-gru/grutables.h
42952@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
42953 * GRU statistics.
42954 */
42955 struct gru_stats_s {
42956- atomic_long_t vdata_alloc;
42957- atomic_long_t vdata_free;
42958- atomic_long_t gts_alloc;
42959- atomic_long_t gts_free;
42960- atomic_long_t gms_alloc;
42961- atomic_long_t gms_free;
42962- atomic_long_t gts_double_allocate;
42963- atomic_long_t assign_context;
42964- atomic_long_t assign_context_failed;
42965- atomic_long_t free_context;
42966- atomic_long_t load_user_context;
42967- atomic_long_t load_kernel_context;
42968- atomic_long_t lock_kernel_context;
42969- atomic_long_t unlock_kernel_context;
42970- atomic_long_t steal_user_context;
42971- atomic_long_t steal_kernel_context;
42972- atomic_long_t steal_context_failed;
42973- atomic_long_t nopfn;
42974- atomic_long_t asid_new;
42975- atomic_long_t asid_next;
42976- atomic_long_t asid_wrap;
42977- atomic_long_t asid_reuse;
42978- atomic_long_t intr;
42979- atomic_long_t intr_cbr;
42980- atomic_long_t intr_tfh;
42981- atomic_long_t intr_spurious;
42982- atomic_long_t intr_mm_lock_failed;
42983- atomic_long_t call_os;
42984- atomic_long_t call_os_wait_queue;
42985- atomic_long_t user_flush_tlb;
42986- atomic_long_t user_unload_context;
42987- atomic_long_t user_exception;
42988- atomic_long_t set_context_option;
42989- atomic_long_t check_context_retarget_intr;
42990- atomic_long_t check_context_unload;
42991- atomic_long_t tlb_dropin;
42992- atomic_long_t tlb_preload_page;
42993- atomic_long_t tlb_dropin_fail_no_asid;
42994- atomic_long_t tlb_dropin_fail_upm;
42995- atomic_long_t tlb_dropin_fail_invalid;
42996- atomic_long_t tlb_dropin_fail_range_active;
42997- atomic_long_t tlb_dropin_fail_idle;
42998- atomic_long_t tlb_dropin_fail_fmm;
42999- atomic_long_t tlb_dropin_fail_no_exception;
43000- atomic_long_t tfh_stale_on_fault;
43001- atomic_long_t mmu_invalidate_range;
43002- atomic_long_t mmu_invalidate_page;
43003- atomic_long_t flush_tlb;
43004- atomic_long_t flush_tlb_gru;
43005- atomic_long_t flush_tlb_gru_tgh;
43006- atomic_long_t flush_tlb_gru_zero_asid;
43007+ atomic_long_unchecked_t vdata_alloc;
43008+ atomic_long_unchecked_t vdata_free;
43009+ atomic_long_unchecked_t gts_alloc;
43010+ atomic_long_unchecked_t gts_free;
43011+ atomic_long_unchecked_t gms_alloc;
43012+ atomic_long_unchecked_t gms_free;
43013+ atomic_long_unchecked_t gts_double_allocate;
43014+ atomic_long_unchecked_t assign_context;
43015+ atomic_long_unchecked_t assign_context_failed;
43016+ atomic_long_unchecked_t free_context;
43017+ atomic_long_unchecked_t load_user_context;
43018+ atomic_long_unchecked_t load_kernel_context;
43019+ atomic_long_unchecked_t lock_kernel_context;
43020+ atomic_long_unchecked_t unlock_kernel_context;
43021+ atomic_long_unchecked_t steal_user_context;
43022+ atomic_long_unchecked_t steal_kernel_context;
43023+ atomic_long_unchecked_t steal_context_failed;
43024+ atomic_long_unchecked_t nopfn;
43025+ atomic_long_unchecked_t asid_new;
43026+ atomic_long_unchecked_t asid_next;
43027+ atomic_long_unchecked_t asid_wrap;
43028+ atomic_long_unchecked_t asid_reuse;
43029+ atomic_long_unchecked_t intr;
43030+ atomic_long_unchecked_t intr_cbr;
43031+ atomic_long_unchecked_t intr_tfh;
43032+ atomic_long_unchecked_t intr_spurious;
43033+ atomic_long_unchecked_t intr_mm_lock_failed;
43034+ atomic_long_unchecked_t call_os;
43035+ atomic_long_unchecked_t call_os_wait_queue;
43036+ atomic_long_unchecked_t user_flush_tlb;
43037+ atomic_long_unchecked_t user_unload_context;
43038+ atomic_long_unchecked_t user_exception;
43039+ atomic_long_unchecked_t set_context_option;
43040+ atomic_long_unchecked_t check_context_retarget_intr;
43041+ atomic_long_unchecked_t check_context_unload;
43042+ atomic_long_unchecked_t tlb_dropin;
43043+ atomic_long_unchecked_t tlb_preload_page;
43044+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
43045+ atomic_long_unchecked_t tlb_dropin_fail_upm;
43046+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
43047+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
43048+ atomic_long_unchecked_t tlb_dropin_fail_idle;
43049+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
43050+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
43051+ atomic_long_unchecked_t tfh_stale_on_fault;
43052+ atomic_long_unchecked_t mmu_invalidate_range;
43053+ atomic_long_unchecked_t mmu_invalidate_page;
43054+ atomic_long_unchecked_t flush_tlb;
43055+ atomic_long_unchecked_t flush_tlb_gru;
43056+ atomic_long_unchecked_t flush_tlb_gru_tgh;
43057+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
43058
43059- atomic_long_t copy_gpa;
43060- atomic_long_t read_gpa;
43061+ atomic_long_unchecked_t copy_gpa;
43062+ atomic_long_unchecked_t read_gpa;
43063
43064- atomic_long_t mesq_receive;
43065- atomic_long_t mesq_receive_none;
43066- atomic_long_t mesq_send;
43067- atomic_long_t mesq_send_failed;
43068- atomic_long_t mesq_noop;
43069- atomic_long_t mesq_send_unexpected_error;
43070- atomic_long_t mesq_send_lb_overflow;
43071- atomic_long_t mesq_send_qlimit_reached;
43072- atomic_long_t mesq_send_amo_nacked;
43073- atomic_long_t mesq_send_put_nacked;
43074- atomic_long_t mesq_page_overflow;
43075- atomic_long_t mesq_qf_locked;
43076- atomic_long_t mesq_qf_noop_not_full;
43077- atomic_long_t mesq_qf_switch_head_failed;
43078- atomic_long_t mesq_qf_unexpected_error;
43079- atomic_long_t mesq_noop_unexpected_error;
43080- atomic_long_t mesq_noop_lb_overflow;
43081- atomic_long_t mesq_noop_qlimit_reached;
43082- atomic_long_t mesq_noop_amo_nacked;
43083- atomic_long_t mesq_noop_put_nacked;
43084- atomic_long_t mesq_noop_page_overflow;
43085+ atomic_long_unchecked_t mesq_receive;
43086+ atomic_long_unchecked_t mesq_receive_none;
43087+ atomic_long_unchecked_t mesq_send;
43088+ atomic_long_unchecked_t mesq_send_failed;
43089+ atomic_long_unchecked_t mesq_noop;
43090+ atomic_long_unchecked_t mesq_send_unexpected_error;
43091+ atomic_long_unchecked_t mesq_send_lb_overflow;
43092+ atomic_long_unchecked_t mesq_send_qlimit_reached;
43093+ atomic_long_unchecked_t mesq_send_amo_nacked;
43094+ atomic_long_unchecked_t mesq_send_put_nacked;
43095+ atomic_long_unchecked_t mesq_page_overflow;
43096+ atomic_long_unchecked_t mesq_qf_locked;
43097+ atomic_long_unchecked_t mesq_qf_noop_not_full;
43098+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
43099+ atomic_long_unchecked_t mesq_qf_unexpected_error;
43100+ atomic_long_unchecked_t mesq_noop_unexpected_error;
43101+ atomic_long_unchecked_t mesq_noop_lb_overflow;
43102+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
43103+ atomic_long_unchecked_t mesq_noop_amo_nacked;
43104+ atomic_long_unchecked_t mesq_noop_put_nacked;
43105+ atomic_long_unchecked_t mesq_noop_page_overflow;
43106
43107 };
43108
43109@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
43110 tghop_invalidate, mcsop_last};
43111
43112 struct mcs_op_statistic {
43113- atomic_long_t count;
43114- atomic_long_t total;
43115+ atomic_long_unchecked_t count;
43116+ atomic_long_unchecked_t total;
43117 unsigned long max;
43118 };
43119
43120@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
43121
43122 #define STAT(id) do { \
43123 if (gru_options & OPT_STATS) \
43124- atomic_long_inc(&gru_stats.id); \
43125+ atomic_long_inc_unchecked(&gru_stats.id); \
43126 } while (0)
43127
43128 #ifdef CONFIG_SGI_GRU_DEBUG
43129diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
43130index c862cd4..0d176fe 100644
43131--- a/drivers/misc/sgi-xp/xp.h
43132+++ b/drivers/misc/sgi-xp/xp.h
43133@@ -288,7 +288,7 @@ struct xpc_interface {
43134 xpc_notify_func, void *);
43135 void (*received) (short, int, void *);
43136 enum xp_retval (*partid_to_nasids) (short, void *);
43137-};
43138+} __no_const;
43139
43140 extern struct xpc_interface xpc_interface;
43141
43142diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
43143index b94d5f7..7f494c5 100644
43144--- a/drivers/misc/sgi-xp/xpc.h
43145+++ b/drivers/misc/sgi-xp/xpc.h
43146@@ -835,6 +835,7 @@ struct xpc_arch_operations {
43147 void (*received_payload) (struct xpc_channel *, void *);
43148 void (*notify_senders_of_disconnect) (struct xpc_channel *);
43149 };
43150+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
43151
43152 /* struct xpc_partition act_state values (for XPC HB) */
43153
43154@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
43155 /* found in xpc_main.c */
43156 extern struct device *xpc_part;
43157 extern struct device *xpc_chan;
43158-extern struct xpc_arch_operations xpc_arch_ops;
43159+extern xpc_arch_operations_no_const xpc_arch_ops;
43160 extern int xpc_disengage_timelimit;
43161 extern int xpc_disengage_timedout;
43162 extern int xpc_activate_IRQ_rcvd;
43163diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
43164index d971817..33bdca5 100644
43165--- a/drivers/misc/sgi-xp/xpc_main.c
43166+++ b/drivers/misc/sgi-xp/xpc_main.c
43167@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
43168 .notifier_call = xpc_system_die,
43169 };
43170
43171-struct xpc_arch_operations xpc_arch_ops;
43172+xpc_arch_operations_no_const xpc_arch_ops;
43173
43174 /*
43175 * Timer function to enforce the timelimit on the partition disengage.
43176@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
43177
43178 if (((die_args->trapnr == X86_TRAP_MF) ||
43179 (die_args->trapnr == X86_TRAP_XF)) &&
43180- !user_mode_vm(die_args->regs))
43181+ !user_mode(die_args->regs))
43182 xpc_die_deactivate();
43183
43184 break;
43185diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
43186index 49f04bc..65660c2 100644
43187--- a/drivers/mmc/core/mmc_ops.c
43188+++ b/drivers/mmc/core/mmc_ops.c
43189@@ -247,7 +247,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
43190 void *data_buf;
43191 int is_on_stack;
43192
43193- is_on_stack = object_is_on_stack(buf);
43194+ is_on_stack = object_starts_on_stack(buf);
43195 if (is_on_stack) {
43196 /*
43197 * dma onto stack is unsafe/nonportable, but callers to this
43198diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
43199index 0b74189..818358f 100644
43200--- a/drivers/mmc/host/dw_mmc.h
43201+++ b/drivers/mmc/host/dw_mmc.h
43202@@ -202,5 +202,5 @@ struct dw_mci_drv_data {
43203 void (*prepare_command)(struct dw_mci *host, u32 *cmdr);
43204 void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
43205 int (*parse_dt)(struct dw_mci *host);
43206-};
43207+} __do_const;
43208 #endif /* _DW_MMC_H_ */
43209diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
43210index c6f6246..60760a8 100644
43211--- a/drivers/mmc/host/sdhci-s3c.c
43212+++ b/drivers/mmc/host/sdhci-s3c.c
43213@@ -664,9 +664,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
43214 * we can use overriding functions instead of default.
43215 */
43216 if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
43217- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
43218- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
43219- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
43220+ pax_open_kernel();
43221+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
43222+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
43223+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
43224+ pax_close_kernel();
43225 }
43226
43227 /* It supports additional host capabilities if needed */
43228diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
43229index 0c8bb6b..6f35deb 100644
43230--- a/drivers/mtd/nand/denali.c
43231+++ b/drivers/mtd/nand/denali.c
43232@@ -24,6 +24,7 @@
43233 #include <linux/slab.h>
43234 #include <linux/mtd/mtd.h>
43235 #include <linux/module.h>
43236+#include <linux/slab.h>
43237
43238 #include "denali.h"
43239
43240diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
43241index 51b9d6a..52af9a7 100644
43242--- a/drivers/mtd/nftlmount.c
43243+++ b/drivers/mtd/nftlmount.c
43244@@ -24,6 +24,7 @@
43245 #include <asm/errno.h>
43246 #include <linux/delay.h>
43247 #include <linux/slab.h>
43248+#include <linux/sched.h>
43249 #include <linux/mtd/mtd.h>
43250 #include <linux/mtd/nand.h>
43251 #include <linux/mtd/nftl.h>
43252diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
43253index f9d5615..99dd95f 100644
43254--- a/drivers/mtd/sm_ftl.c
43255+++ b/drivers/mtd/sm_ftl.c
43256@@ -56,7 +56,7 @@ ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
43257 #define SM_CIS_VENDOR_OFFSET 0x59
43258 struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
43259 {
43260- struct attribute_group *attr_group;
43261+ attribute_group_no_const *attr_group;
43262 struct attribute **attributes;
43263 struct sm_sysfs_attribute *vendor_attribute;
43264
43265diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
43266index f975696..4597e21 100644
43267--- a/drivers/net/bonding/bond_main.c
43268+++ b/drivers/net/bonding/bond_main.c
43269@@ -4870,7 +4870,7 @@ static unsigned int bond_get_num_tx_queues(void)
43270 return tx_queues;
43271 }
43272
43273-static struct rtnl_link_ops bond_link_ops __read_mostly = {
43274+static struct rtnl_link_ops bond_link_ops = {
43275 .kind = "bond",
43276 .priv_size = sizeof(struct bonding),
43277 .setup = bond_setup,
43278@@ -4995,8 +4995,8 @@ static void __exit bonding_exit(void)
43279
43280 bond_destroy_debugfs();
43281
43282- rtnl_link_unregister(&bond_link_ops);
43283 unregister_pernet_subsys(&bond_net_ops);
43284+ rtnl_link_unregister(&bond_link_ops);
43285
43286 #ifdef CONFIG_NET_POLL_CONTROLLER
43287 /*
43288diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
43289index e1d2643..7f4133b 100644
43290--- a/drivers/net/ethernet/8390/ax88796.c
43291+++ b/drivers/net/ethernet/8390/ax88796.c
43292@@ -872,9 +872,11 @@ static int ax_probe(struct platform_device *pdev)
43293 if (ax->plat->reg_offsets)
43294 ei_local->reg_offset = ax->plat->reg_offsets;
43295 else {
43296+ resource_size_t _mem_size = mem_size;
43297+ do_div(_mem_size, 0x18);
43298 ei_local->reg_offset = ax->reg_offsets;
43299 for (ret = 0; ret < 0x18; ret++)
43300- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
43301+ ax->reg_offsets[ret] = _mem_size * ret;
43302 }
43303
43304 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
43305diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
43306index 151675d..0139a9d 100644
43307--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
43308+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
43309@@ -1112,7 +1112,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
43310 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
43311 {
43312 /* RX_MODE controlling object */
43313- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
43314+ bnx2x_init_rx_mode_obj(bp);
43315
43316 /* multicast configuration controlling object */
43317 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
43318diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
43319index ce1a916..10b52b0 100644
43320--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
43321+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
43322@@ -960,6 +960,9 @@ static int bnx2x_set_dump(struct net_device *dev, struct ethtool_dump *val)
43323 struct bnx2x *bp = netdev_priv(dev);
43324
43325 /* Use the ethtool_dump "flag" field as the dump preset index */
43326+ if (val->flag < 1 || val->flag > DUMP_MAX_PRESETS)
43327+ return -EINVAL;
43328+
43329 bp->dump_preset_idx = val->flag;
43330 return 0;
43331 }
43332@@ -986,8 +989,6 @@ static int bnx2x_get_dump_data(struct net_device *dev,
43333 struct bnx2x *bp = netdev_priv(dev);
43334 struct dump_header dump_hdr = {0};
43335
43336- memset(p, 0, dump->len);
43337-
43338 /* Disable parity attentions as long as following dump may
43339 * cause false alarms by reading never written registers. We
43340 * will re-enable parity attentions right after the dump.
43341diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
43342index b4c9dea..2a9927f 100644
43343--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
43344+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
43345@@ -11497,6 +11497,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
43346 bp->min_msix_vec_cnt = 2;
43347 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
43348
43349+ bp->dump_preset_idx = 1;
43350+
43351 return rc;
43352 }
43353
43354diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
43355index 32a9609..0b1c53a 100644
43356--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
43357+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
43358@@ -2387,15 +2387,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
43359 return rc;
43360 }
43361
43362-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
43363- struct bnx2x_rx_mode_obj *o)
43364+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
43365 {
43366 if (CHIP_IS_E1x(bp)) {
43367- o->wait_comp = bnx2x_empty_rx_mode_wait;
43368- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
43369+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
43370+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
43371 } else {
43372- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
43373- o->config_rx_mode = bnx2x_set_rx_mode_e2;
43374+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
43375+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
43376 }
43377 }
43378
43379diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
43380index 43c00bc..dd1d03d 100644
43381--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
43382+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
43383@@ -1321,8 +1321,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
43384
43385 /********************* RX MODE ****************/
43386
43387-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
43388- struct bnx2x_rx_mode_obj *o);
43389+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
43390
43391 /**
43392 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
43393diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
43394index ff6e30e..87e8452 100644
43395--- a/drivers/net/ethernet/broadcom/tg3.h
43396+++ b/drivers/net/ethernet/broadcom/tg3.h
43397@@ -147,6 +147,7 @@
43398 #define CHIPREV_ID_5750_A0 0x4000
43399 #define CHIPREV_ID_5750_A1 0x4001
43400 #define CHIPREV_ID_5750_A3 0x4003
43401+#define CHIPREV_ID_5750_C1 0x4201
43402 #define CHIPREV_ID_5750_C2 0x4202
43403 #define CHIPREV_ID_5752_A0_HW 0x5000
43404 #define CHIPREV_ID_5752_A0 0x6000
43405diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
43406index 71497e8..b650951 100644
43407--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
43408+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
43409@@ -3037,7 +3037,9 @@ static void t3_io_resume(struct pci_dev *pdev)
43410 CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
43411 t3_read_reg(adapter, A_PCIE_PEX_ERR));
43412
43413+ rtnl_lock();
43414 t3_resume_ports(adapter);
43415+ rtnl_unlock();
43416 }
43417
43418 static const struct pci_error_handlers t3_err_handler = {
43419diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
43420index 8cffcdf..aadf043 100644
43421--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
43422+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
43423@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
43424 */
43425 struct l2t_skb_cb {
43426 arp_failure_handler_func arp_failure_handler;
43427-};
43428+} __no_const;
43429
43430 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
43431
43432diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
43433index 4c83003..2a2a5b9 100644
43434--- a/drivers/net/ethernet/dec/tulip/de4x5.c
43435+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
43436@@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
43437 for (i=0; i<ETH_ALEN; i++) {
43438 tmp.addr[i] = dev->dev_addr[i];
43439 }
43440- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
43441+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
43442 break;
43443
43444 case DE4X5_SET_HWADDR: /* Set the hardware address */
43445@@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
43446 spin_lock_irqsave(&lp->lock, flags);
43447 memcpy(&statbuf, &lp->pktStats, ioc->len);
43448 spin_unlock_irqrestore(&lp->lock, flags);
43449- if (copy_to_user(ioc->data, &statbuf, ioc->len))
43450+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
43451 return -EFAULT;
43452 break;
43453 }
43454diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
43455index 6e43426..1bd8365 100644
43456--- a/drivers/net/ethernet/emulex/benet/be_main.c
43457+++ b/drivers/net/ethernet/emulex/benet/be_main.c
43458@@ -469,7 +469,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
43459
43460 if (wrapped)
43461 newacc += 65536;
43462- ACCESS_ONCE(*acc) = newacc;
43463+ ACCESS_ONCE_RW(*acc) = newacc;
43464 }
43465
43466 void populate_erx_stats(struct be_adapter *adapter,
43467diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
43468index 21b85fb..b49e5fc 100644
43469--- a/drivers/net/ethernet/faraday/ftgmac100.c
43470+++ b/drivers/net/ethernet/faraday/ftgmac100.c
43471@@ -31,6 +31,8 @@
43472 #include <linux/netdevice.h>
43473 #include <linux/phy.h>
43474 #include <linux/platform_device.h>
43475+#include <linux/interrupt.h>
43476+#include <linux/irqreturn.h>
43477 #include <net/ip.h>
43478
43479 #include "ftgmac100.h"
43480diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
43481index a6eda8d..935d273 100644
43482--- a/drivers/net/ethernet/faraday/ftmac100.c
43483+++ b/drivers/net/ethernet/faraday/ftmac100.c
43484@@ -31,6 +31,8 @@
43485 #include <linux/module.h>
43486 #include <linux/netdevice.h>
43487 #include <linux/platform_device.h>
43488+#include <linux/interrupt.h>
43489+#include <linux/irqreturn.h>
43490
43491 #include "ftmac100.h"
43492
43493diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
43494index 331987d..3be1135 100644
43495--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
43496+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
43497@@ -776,7 +776,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
43498 }
43499
43500 /* update the base incval used to calculate frequency adjustment */
43501- ACCESS_ONCE(adapter->base_incval) = incval;
43502+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
43503 smp_mb();
43504
43505 /* need lock to prevent incorrect read while modifying cyclecounter */
43506diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
43507index fbe5363..266b4e3 100644
43508--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
43509+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
43510@@ -3461,7 +3461,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
43511 struct __vxge_hw_fifo *fifo;
43512 struct vxge_hw_fifo_config *config;
43513 u32 txdl_size, txdl_per_memblock;
43514- struct vxge_hw_mempool_cbs fifo_mp_callback;
43515+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
43516+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
43517+ };
43518+
43519 struct __vxge_hw_virtualpath *vpath;
43520
43521 if ((vp == NULL) || (attr == NULL)) {
43522@@ -3544,8 +3547,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
43523 goto exit;
43524 }
43525
43526- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
43527-
43528 fifo->mempool =
43529 __vxge_hw_mempool_create(vpath->hldev,
43530 fifo->config->memblock_size,
43531diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
43532index 5e7fb1d..f8d1810 100644
43533--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
43534+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
43535@@ -1948,7 +1948,9 @@ int qlcnic_83xx_config_default_opmode(struct qlcnic_adapter *adapter)
43536 op_mode = QLC_83XX_DEFAULT_OPMODE;
43537
43538 if (op_mode == QLC_83XX_DEFAULT_OPMODE) {
43539- adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
43540+ pax_open_kernel();
43541+ *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
43542+ pax_close_kernel();
43543 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
43544 } else {
43545 return -EIO;
43546diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
43547index b0c3de9..fc5857e 100644
43548--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
43549+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
43550@@ -200,15 +200,21 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
43551 if (priv_level == QLCNIC_NON_PRIV_FUNC) {
43552 ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
43553 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
43554- nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
43555+ pax_open_kernel();
43556+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
43557+ pax_close_kernel();
43558 } else if (priv_level == QLCNIC_PRIV_FUNC) {
43559 ahw->op_mode = QLCNIC_PRIV_FUNC;
43560 ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
43561- nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
43562+ pax_open_kernel();
43563+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
43564+ pax_close_kernel();
43565 } else if (priv_level == QLCNIC_MGMT_FUNC) {
43566 ahw->op_mode = QLCNIC_MGMT_FUNC;
43567 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
43568- nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
43569+ pax_open_kernel();
43570+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
43571+ pax_close_kernel();
43572 } else {
43573 return -EIO;
43574 }
43575diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
43576index 6acf82b..14b097e 100644
43577--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
43578+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
43579@@ -206,10 +206,10 @@ int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter)
43580 if (err) {
43581 dev_info(&adapter->pdev->dev,
43582 "Failed to set driver version in firmware\n");
43583- return -EIO;
43584+ err = -EIO;
43585 }
43586-
43587- return 0;
43588+ qlcnic_free_mbx_args(&cmd);
43589+ return err;
43590 }
43591
43592 int
43593diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
43594index d3f8797..82a03d3 100644
43595--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
43596+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
43597@@ -262,7 +262,7 @@ void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
43598
43599 mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
43600 mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
43601- memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
43602+ memcpy(mac_req->mac_addr, uaddr, ETH_ALEN);
43603
43604 vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
43605 vlan_req->vlan_id = cpu_to_le16(vlan_id);
43606diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
43607index 887aebe..9095ff9 100644
43608--- a/drivers/net/ethernet/realtek/8139cp.c
43609+++ b/drivers/net/ethernet/realtek/8139cp.c
43610@@ -524,6 +524,7 @@ rx_status_loop:
43611 PCI_DMA_FROMDEVICE);
43612 if (dma_mapping_error(&cp->pdev->dev, new_mapping)) {
43613 dev->stats.rx_dropped++;
43614+ kfree_skb(new_skb);
43615 goto rx_next;
43616 }
43617
43618diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
43619index 393f961..d343034 100644
43620--- a/drivers/net/ethernet/realtek/r8169.c
43621+++ b/drivers/net/ethernet/realtek/r8169.c
43622@@ -753,22 +753,22 @@ struct rtl8169_private {
43623 struct mdio_ops {
43624 void (*write)(struct rtl8169_private *, int, int);
43625 int (*read)(struct rtl8169_private *, int);
43626- } mdio_ops;
43627+ } __no_const mdio_ops;
43628
43629 struct pll_power_ops {
43630 void (*down)(struct rtl8169_private *);
43631 void (*up)(struct rtl8169_private *);
43632- } pll_power_ops;
43633+ } __no_const pll_power_ops;
43634
43635 struct jumbo_ops {
43636 void (*enable)(struct rtl8169_private *);
43637 void (*disable)(struct rtl8169_private *);
43638- } jumbo_ops;
43639+ } __no_const jumbo_ops;
43640
43641 struct csi_ops {
43642 void (*write)(struct rtl8169_private *, int, int);
43643 u32 (*read)(struct rtl8169_private *, int);
43644- } csi_ops;
43645+ } __no_const csi_ops;
43646
43647 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
43648 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
43649diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
43650index 9a95abf..36df7f9 100644
43651--- a/drivers/net/ethernet/sfc/ptp.c
43652+++ b/drivers/net/ethernet/sfc/ptp.c
43653@@ -535,7 +535,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
43654 (u32)((u64)ptp->start.dma_addr >> 32));
43655
43656 /* Clear flag that signals MC ready */
43657- ACCESS_ONCE(*start) = 0;
43658+ ACCESS_ONCE_RW(*start) = 0;
43659 efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
43660 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
43661
43662diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
43663index 50617c5..b13724c 100644
43664--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
43665+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
43666@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
43667
43668 writel(value, ioaddr + MMC_CNTRL);
43669
43670- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
43671- MMC_CNTRL, value);
43672+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
43673+// MMC_CNTRL, value);
43674 }
43675
43676 /* To mask all all interrupts.*/
43677diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
43678index e6fe0d8..2b7d752 100644
43679--- a/drivers/net/hyperv/hyperv_net.h
43680+++ b/drivers/net/hyperv/hyperv_net.h
43681@@ -101,7 +101,7 @@ struct rndis_device {
43682
43683 enum rndis_device_state state;
43684 bool link_state;
43685- atomic_t new_req_id;
43686+ atomic_unchecked_t new_req_id;
43687
43688 spinlock_t request_lock;
43689 struct list_head req_list;
43690diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
43691index 0775f0a..d4fb316 100644
43692--- a/drivers/net/hyperv/rndis_filter.c
43693+++ b/drivers/net/hyperv/rndis_filter.c
43694@@ -104,7 +104,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
43695 * template
43696 */
43697 set = &rndis_msg->msg.set_req;
43698- set->req_id = atomic_inc_return(&dev->new_req_id);
43699+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
43700
43701 /* Add to the request list */
43702 spin_lock_irqsave(&dev->request_lock, flags);
43703@@ -752,7 +752,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
43704
43705 /* Setup the rndis set */
43706 halt = &request->request_msg.msg.halt_req;
43707- halt->req_id = atomic_inc_return(&dev->new_req_id);
43708+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
43709
43710 /* Ignore return since this msg is optional. */
43711 rndis_filter_send_request(dev, request);
43712diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
43713index bf0d55e..82bcfbd1 100644
43714--- a/drivers/net/ieee802154/fakehard.c
43715+++ b/drivers/net/ieee802154/fakehard.c
43716@@ -364,7 +364,7 @@ static int ieee802154fake_probe(struct platform_device *pdev)
43717 phy->transmit_power = 0xbf;
43718
43719 dev->netdev_ops = &fake_ops;
43720- dev->ml_priv = &fake_mlme;
43721+ dev->ml_priv = (void *)&fake_mlme;
43722
43723 priv = netdev_priv(dev);
43724 priv->phy = phy;
43725diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
43726index 6e91931..2b0ebe7 100644
43727--- a/drivers/net/macvlan.c
43728+++ b/drivers/net/macvlan.c
43729@@ -905,13 +905,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
43730 int macvlan_link_register(struct rtnl_link_ops *ops)
43731 {
43732 /* common fields */
43733- ops->priv_size = sizeof(struct macvlan_dev);
43734- ops->validate = macvlan_validate;
43735- ops->maxtype = IFLA_MACVLAN_MAX;
43736- ops->policy = macvlan_policy;
43737- ops->changelink = macvlan_changelink;
43738- ops->get_size = macvlan_get_size;
43739- ops->fill_info = macvlan_fill_info;
43740+ pax_open_kernel();
43741+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
43742+ *(void **)&ops->validate = macvlan_validate;
43743+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
43744+ *(const void **)&ops->policy = macvlan_policy;
43745+ *(void **)&ops->changelink = macvlan_changelink;
43746+ *(void **)&ops->get_size = macvlan_get_size;
43747+ *(void **)&ops->fill_info = macvlan_fill_info;
43748+ pax_close_kernel();
43749
43750 return rtnl_link_register(ops);
43751 };
43752@@ -967,7 +969,7 @@ static int macvlan_device_event(struct notifier_block *unused,
43753 return NOTIFY_DONE;
43754 }
43755
43756-static struct notifier_block macvlan_notifier_block __read_mostly = {
43757+static struct notifier_block macvlan_notifier_block = {
43758 .notifier_call = macvlan_device_event,
43759 };
43760
43761diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
43762index 523d6b2..5e16aa1 100644
43763--- a/drivers/net/macvtap.c
43764+++ b/drivers/net/macvtap.c
43765@@ -1110,7 +1110,7 @@ static int macvtap_device_event(struct notifier_block *unused,
43766 return NOTIFY_DONE;
43767 }
43768
43769-static struct notifier_block macvtap_notifier_block __read_mostly = {
43770+static struct notifier_block macvtap_notifier_block = {
43771 .notifier_call = macvtap_device_event,
43772 };
43773
43774diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
43775index daec9b0..6428fcb 100644
43776--- a/drivers/net/phy/mdio-bitbang.c
43777+++ b/drivers/net/phy/mdio-bitbang.c
43778@@ -234,6 +234,7 @@ void free_mdio_bitbang(struct mii_bus *bus)
43779 struct mdiobb_ctrl *ctrl = bus->priv;
43780
43781 module_put(ctrl->ops->owner);
43782+ mdiobus_unregister(bus);
43783 mdiobus_free(bus);
43784 }
43785 EXPORT_SYMBOL(free_mdio_bitbang);
43786diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
43787index 72ff14b..11d442d 100644
43788--- a/drivers/net/ppp/ppp_generic.c
43789+++ b/drivers/net/ppp/ppp_generic.c
43790@@ -999,7 +999,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
43791 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
43792 struct ppp_stats stats;
43793 struct ppp_comp_stats cstats;
43794- char *vers;
43795
43796 switch (cmd) {
43797 case SIOCGPPPSTATS:
43798@@ -1021,8 +1020,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
43799 break;
43800
43801 case SIOCGPPPVER:
43802- vers = PPP_VERSION;
43803- if (copy_to_user(addr, vers, strlen(vers) + 1))
43804+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
43805 break;
43806 err = 0;
43807 break;
43808diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
43809index 1252d9c..80e660b 100644
43810--- a/drivers/net/slip/slhc.c
43811+++ b/drivers/net/slip/slhc.c
43812@@ -488,7 +488,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
43813 register struct tcphdr *thp;
43814 register struct iphdr *ip;
43815 register struct cstate *cs;
43816- int len, hdrlen;
43817+ long len, hdrlen;
43818 unsigned char *cp = icp;
43819
43820 /* We've got a compressed packet; read the change byte */
43821diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
43822index b305105..8ead6df 100644
43823--- a/drivers/net/team/team.c
43824+++ b/drivers/net/team/team.c
43825@@ -2682,7 +2682,7 @@ static int team_device_event(struct notifier_block *unused,
43826 return NOTIFY_DONE;
43827 }
43828
43829-static struct notifier_block team_notifier_block __read_mostly = {
43830+static struct notifier_block team_notifier_block = {
43831 .notifier_call = team_device_event,
43832 };
43833
43834diff --git a/drivers/net/tun.c b/drivers/net/tun.c
43835index 2491eb2..1a453eb 100644
43836--- a/drivers/net/tun.c
43837+++ b/drivers/net/tun.c
43838@@ -1076,8 +1076,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
43839 u32 rxhash;
43840
43841 if (!(tun->flags & TUN_NO_PI)) {
43842- if ((len -= sizeof(pi)) > total_len)
43843+ if (len < sizeof(pi))
43844 return -EINVAL;
43845+ len -= sizeof(pi);
43846
43847 if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi)))
43848 return -EFAULT;
43849@@ -1085,8 +1086,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
43850 }
43851
43852 if (tun->flags & TUN_VNET_HDR) {
43853- if ((len -= tun->vnet_hdr_sz) > total_len)
43854+ if (len < tun->vnet_hdr_sz)
43855 return -EINVAL;
43856+ len -= tun->vnet_hdr_sz;
43857
43858 if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
43859 return -EFAULT;
43860@@ -1869,7 +1871,7 @@ unlock:
43861 }
43862
43863 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
43864- unsigned long arg, int ifreq_len)
43865+ unsigned long arg, size_t ifreq_len)
43866 {
43867 struct tun_file *tfile = file->private_data;
43868 struct tun_struct *tun;
43869@@ -1881,6 +1883,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
43870 int vnet_hdr_sz;
43871 int ret;
43872
43873+ if (ifreq_len > sizeof ifr)
43874+ return -EFAULT;
43875+
43876 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
43877 if (copy_from_user(&ifr, argp, ifreq_len))
43878 return -EFAULT;
43879diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
43880index cba1d46..f703766 100644
43881--- a/drivers/net/usb/hso.c
43882+++ b/drivers/net/usb/hso.c
43883@@ -71,7 +71,7 @@
43884 #include <asm/byteorder.h>
43885 #include <linux/serial_core.h>
43886 #include <linux/serial.h>
43887-
43888+#include <asm/local.h>
43889
43890 #define MOD_AUTHOR "Option Wireless"
43891 #define MOD_DESCRIPTION "USB High Speed Option driver"
43892@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
43893 struct urb *urb;
43894
43895 urb = serial->rx_urb[0];
43896- if (serial->port.count > 0) {
43897+ if (atomic_read(&serial->port.count) > 0) {
43898 count = put_rxbuf_data(urb, serial);
43899 if (count == -1)
43900 return;
43901@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
43902 DUMP1(urb->transfer_buffer, urb->actual_length);
43903
43904 /* Anyone listening? */
43905- if (serial->port.count == 0)
43906+ if (atomic_read(&serial->port.count) == 0)
43907 return;
43908
43909 if (status == 0) {
43910@@ -1298,8 +1298,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
43911 tty_port_tty_set(&serial->port, tty);
43912
43913 /* check for port already opened, if not set the termios */
43914- serial->port.count++;
43915- if (serial->port.count == 1) {
43916+ if (atomic_inc_return(&serial->port.count) == 1) {
43917 serial->rx_state = RX_IDLE;
43918 /* Force default termio settings */
43919 _hso_serial_set_termios(tty, NULL);
43920@@ -1311,7 +1310,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
43921 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
43922 if (result) {
43923 hso_stop_serial_device(serial->parent);
43924- serial->port.count--;
43925+ atomic_dec(&serial->port.count);
43926 kref_put(&serial->parent->ref, hso_serial_ref_free);
43927 }
43928 } else {
43929@@ -1348,10 +1347,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
43930
43931 /* reset the rts and dtr */
43932 /* do the actual close */
43933- serial->port.count--;
43934+ atomic_dec(&serial->port.count);
43935
43936- if (serial->port.count <= 0) {
43937- serial->port.count = 0;
43938+ if (atomic_read(&serial->port.count) <= 0) {
43939+ atomic_set(&serial->port.count, 0);
43940 tty_port_tty_set(&serial->port, NULL);
43941 if (!usb_gone)
43942 hso_stop_serial_device(serial->parent);
43943@@ -1427,7 +1426,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
43944
43945 /* the actual setup */
43946 spin_lock_irqsave(&serial->serial_lock, flags);
43947- if (serial->port.count)
43948+ if (atomic_read(&serial->port.count))
43949 _hso_serial_set_termios(tty, old);
43950 else
43951 tty->termios = *old;
43952@@ -1886,7 +1885,7 @@ static void intr_callback(struct urb *urb)
43953 D1("Pending read interrupt on port %d\n", i);
43954 spin_lock(&serial->serial_lock);
43955 if (serial->rx_state == RX_IDLE &&
43956- serial->port.count > 0) {
43957+ atomic_read(&serial->port.count) > 0) {
43958 /* Setup and send a ctrl req read on
43959 * port i */
43960 if (!serial->rx_urb_filled[0]) {
43961@@ -3057,7 +3056,7 @@ static int hso_resume(struct usb_interface *iface)
43962 /* Start all serial ports */
43963 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
43964 if (serial_table[i] && (serial_table[i]->interface == iface)) {
43965- if (dev2ser(serial_table[i])->port.count) {
43966+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
43967 result =
43968 hso_start_serial_device(serial_table[i], GFP_NOIO);
43969 hso_kick_transmit(dev2ser(serial_table[i]));
43970diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
43971index 57325f3..36b181f 100644
43972--- a/drivers/net/vxlan.c
43973+++ b/drivers/net/vxlan.c
43974@@ -1579,7 +1579,7 @@ nla_put_failure:
43975 return -EMSGSIZE;
43976 }
43977
43978-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
43979+static struct rtnl_link_ops vxlan_link_ops = {
43980 .kind = "vxlan",
43981 .maxtype = IFLA_VXLAN_MAX,
43982 .policy = vxlan_policy,
43983diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
43984index 34c8a33..3261fdc 100644
43985--- a/drivers/net/wireless/at76c50x-usb.c
43986+++ b/drivers/net/wireless/at76c50x-usb.c
43987@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
43988 }
43989
43990 /* Convert timeout from the DFU status to jiffies */
43991-static inline unsigned long at76_get_timeout(struct dfu_status *s)
43992+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
43993 {
43994 return msecs_to_jiffies((s->poll_timeout[2] << 16)
43995 | (s->poll_timeout[1] << 8)
43996diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
43997index 8d78253..bebbb68 100644
43998--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
43999+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
44000@@ -184,8 +184,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
44001 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
44002 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
44003
44004- ACCESS_ONCE(ads->ds_link) = i->link;
44005- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
44006+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
44007+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
44008
44009 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
44010 ctl6 = SM(i->keytype, AR_EncrType);
44011@@ -199,26 +199,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
44012
44013 if ((i->is_first || i->is_last) &&
44014 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
44015- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
44016+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
44017 | set11nTries(i->rates, 1)
44018 | set11nTries(i->rates, 2)
44019 | set11nTries(i->rates, 3)
44020 | (i->dur_update ? AR_DurUpdateEna : 0)
44021 | SM(0, AR_BurstDur);
44022
44023- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
44024+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
44025 | set11nRate(i->rates, 1)
44026 | set11nRate(i->rates, 2)
44027 | set11nRate(i->rates, 3);
44028 } else {
44029- ACCESS_ONCE(ads->ds_ctl2) = 0;
44030- ACCESS_ONCE(ads->ds_ctl3) = 0;
44031+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
44032+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
44033 }
44034
44035 if (!i->is_first) {
44036- ACCESS_ONCE(ads->ds_ctl0) = 0;
44037- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
44038- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
44039+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
44040+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
44041+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
44042 return;
44043 }
44044
44045@@ -243,7 +243,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
44046 break;
44047 }
44048
44049- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
44050+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
44051 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
44052 | SM(i->txpower, AR_XmitPower)
44053 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
44054@@ -253,19 +253,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
44055 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
44056 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
44057
44058- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
44059- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
44060+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
44061+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
44062
44063 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
44064 return;
44065
44066- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
44067+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
44068 | set11nPktDurRTSCTS(i->rates, 1);
44069
44070- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
44071+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
44072 | set11nPktDurRTSCTS(i->rates, 3);
44073
44074- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
44075+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
44076 | set11nRateFlags(i->rates, 1)
44077 | set11nRateFlags(i->rates, 2)
44078 | set11nRateFlags(i->rates, 3)
44079diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
44080index 301bf72..3f5654f 100644
44081--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
44082+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
44083@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
44084 (i->qcu << AR_TxQcuNum_S) | desc_len;
44085
44086 checksum += val;
44087- ACCESS_ONCE(ads->info) = val;
44088+ ACCESS_ONCE_RW(ads->info) = val;
44089
44090 checksum += i->link;
44091- ACCESS_ONCE(ads->link) = i->link;
44092+ ACCESS_ONCE_RW(ads->link) = i->link;
44093
44094 checksum += i->buf_addr[0];
44095- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
44096+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
44097 checksum += i->buf_addr[1];
44098- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
44099+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
44100 checksum += i->buf_addr[2];
44101- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
44102+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
44103 checksum += i->buf_addr[3];
44104- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
44105+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
44106
44107 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
44108- ACCESS_ONCE(ads->ctl3) = val;
44109+ ACCESS_ONCE_RW(ads->ctl3) = val;
44110 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
44111- ACCESS_ONCE(ads->ctl5) = val;
44112+ ACCESS_ONCE_RW(ads->ctl5) = val;
44113 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
44114- ACCESS_ONCE(ads->ctl7) = val;
44115+ ACCESS_ONCE_RW(ads->ctl7) = val;
44116 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
44117- ACCESS_ONCE(ads->ctl9) = val;
44118+ ACCESS_ONCE_RW(ads->ctl9) = val;
44119
44120 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
44121- ACCESS_ONCE(ads->ctl10) = checksum;
44122+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
44123
44124 if (i->is_first || i->is_last) {
44125- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
44126+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
44127 | set11nTries(i->rates, 1)
44128 | set11nTries(i->rates, 2)
44129 | set11nTries(i->rates, 3)
44130 | (i->dur_update ? AR_DurUpdateEna : 0)
44131 | SM(0, AR_BurstDur);
44132
44133- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
44134+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
44135 | set11nRate(i->rates, 1)
44136 | set11nRate(i->rates, 2)
44137 | set11nRate(i->rates, 3);
44138 } else {
44139- ACCESS_ONCE(ads->ctl13) = 0;
44140- ACCESS_ONCE(ads->ctl14) = 0;
44141+ ACCESS_ONCE_RW(ads->ctl13) = 0;
44142+ ACCESS_ONCE_RW(ads->ctl14) = 0;
44143 }
44144
44145 ads->ctl20 = 0;
44146@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
44147
44148 ctl17 = SM(i->keytype, AR_EncrType);
44149 if (!i->is_first) {
44150- ACCESS_ONCE(ads->ctl11) = 0;
44151- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
44152- ACCESS_ONCE(ads->ctl15) = 0;
44153- ACCESS_ONCE(ads->ctl16) = 0;
44154- ACCESS_ONCE(ads->ctl17) = ctl17;
44155- ACCESS_ONCE(ads->ctl18) = 0;
44156- ACCESS_ONCE(ads->ctl19) = 0;
44157+ ACCESS_ONCE_RW(ads->ctl11) = 0;
44158+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
44159+ ACCESS_ONCE_RW(ads->ctl15) = 0;
44160+ ACCESS_ONCE_RW(ads->ctl16) = 0;
44161+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
44162+ ACCESS_ONCE_RW(ads->ctl18) = 0;
44163+ ACCESS_ONCE_RW(ads->ctl19) = 0;
44164 return;
44165 }
44166
44167- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
44168+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
44169 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
44170 | SM(i->txpower, AR_XmitPower)
44171 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
44172@@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
44173 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
44174 ctl12 |= SM(val, AR_PAPRDChainMask);
44175
44176- ACCESS_ONCE(ads->ctl12) = ctl12;
44177- ACCESS_ONCE(ads->ctl17) = ctl17;
44178+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
44179+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
44180
44181- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
44182+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
44183 | set11nPktDurRTSCTS(i->rates, 1);
44184
44185- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
44186+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
44187 | set11nPktDurRTSCTS(i->rates, 3);
44188
44189- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
44190+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
44191 | set11nRateFlags(i->rates, 1)
44192 | set11nRateFlags(i->rates, 2)
44193 | set11nRateFlags(i->rates, 3)
44194 | SM(i->rtscts_rate, AR_RTSCTSRate);
44195
44196- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
44197+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
44198 }
44199
44200 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
44201diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
44202index ae30343..a117806 100644
44203--- a/drivers/net/wireless/ath/ath9k/hw.h
44204+++ b/drivers/net/wireless/ath/ath9k/hw.h
44205@@ -652,7 +652,7 @@ struct ath_hw_private_ops {
44206
44207 /* ANI */
44208 void (*ani_cache_ini_regs)(struct ath_hw *ah);
44209-};
44210+} __no_const;
44211
44212 /**
44213 * struct ath_spec_scan - parameters for Atheros spectral scan
44214@@ -721,7 +721,7 @@ struct ath_hw_ops {
44215 struct ath_spec_scan *param);
44216 void (*spectral_scan_trigger)(struct ath_hw *ah);
44217 void (*spectral_scan_wait)(struct ath_hw *ah);
44218-};
44219+} __no_const;
44220
44221 struct ath_nf_limits {
44222 s16 max;
44223diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
44224index b37a582..680835d 100644
44225--- a/drivers/net/wireless/iwlegacy/3945-mac.c
44226+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
44227@@ -3639,7 +3639,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
44228 */
44229 if (il3945_mod_params.disable_hw_scan) {
44230 D_INFO("Disabling hw_scan\n");
44231- il3945_mac_ops.hw_scan = NULL;
44232+ pax_open_kernel();
44233+ *(void **)&il3945_mac_ops.hw_scan = NULL;
44234+ pax_close_kernel();
44235 }
44236
44237 D_INFO("*** LOAD DRIVER ***\n");
44238diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
44239index d532948..e0d8bb1 100644
44240--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
44241+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
44242@@ -203,7 +203,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
44243 {
44244 struct iwl_priv *priv = file->private_data;
44245 char buf[64];
44246- int buf_size;
44247+ size_t buf_size;
44248 u32 offset, len;
44249
44250 memset(buf, 0, sizeof(buf));
44251@@ -473,7 +473,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
44252 struct iwl_priv *priv = file->private_data;
44253
44254 char buf[8];
44255- int buf_size;
44256+ size_t buf_size;
44257 u32 reset_flag;
44258
44259 memset(buf, 0, sizeof(buf));
44260@@ -554,7 +554,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
44261 {
44262 struct iwl_priv *priv = file->private_data;
44263 char buf[8];
44264- int buf_size;
44265+ size_t buf_size;
44266 int ht40;
44267
44268 memset(buf, 0, sizeof(buf));
44269@@ -606,7 +606,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
44270 {
44271 struct iwl_priv *priv = file->private_data;
44272 char buf[8];
44273- int buf_size;
44274+ size_t buf_size;
44275 int value;
44276
44277 memset(buf, 0, sizeof(buf));
44278@@ -698,10 +698,10 @@ DEBUGFS_READ_FILE_OPS(temperature);
44279 DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
44280 DEBUGFS_READ_FILE_OPS(current_sleep_command);
44281
44282-static const char *fmt_value = " %-30s %10u\n";
44283-static const char *fmt_hex = " %-30s 0x%02X\n";
44284-static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
44285-static const char *fmt_header =
44286+static const char fmt_value[] = " %-30s %10u\n";
44287+static const char fmt_hex[] = " %-30s 0x%02X\n";
44288+static const char fmt_table[] = " %-30s %10u %10u %10u %10u\n";
44289+static const char fmt_header[] =
44290 "%-32s current cumulative delta max\n";
44291
44292 static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
44293@@ -1871,7 +1871,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
44294 {
44295 struct iwl_priv *priv = file->private_data;
44296 char buf[8];
44297- int buf_size;
44298+ size_t buf_size;
44299 int clear;
44300
44301 memset(buf, 0, sizeof(buf));
44302@@ -1916,7 +1916,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
44303 {
44304 struct iwl_priv *priv = file->private_data;
44305 char buf[8];
44306- int buf_size;
44307+ size_t buf_size;
44308 int trace;
44309
44310 memset(buf, 0, sizeof(buf));
44311@@ -1987,7 +1987,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
44312 {
44313 struct iwl_priv *priv = file->private_data;
44314 char buf[8];
44315- int buf_size;
44316+ size_t buf_size;
44317 int missed;
44318
44319 memset(buf, 0, sizeof(buf));
44320@@ -2028,7 +2028,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
44321
44322 struct iwl_priv *priv = file->private_data;
44323 char buf[8];
44324- int buf_size;
44325+ size_t buf_size;
44326 int plcp;
44327
44328 memset(buf, 0, sizeof(buf));
44329@@ -2088,7 +2088,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
44330
44331 struct iwl_priv *priv = file->private_data;
44332 char buf[8];
44333- int buf_size;
44334+ size_t buf_size;
44335 int flush;
44336
44337 memset(buf, 0, sizeof(buf));
44338@@ -2178,7 +2178,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
44339
44340 struct iwl_priv *priv = file->private_data;
44341 char buf[8];
44342- int buf_size;
44343+ size_t buf_size;
44344 int rts;
44345
44346 if (!priv->cfg->ht_params)
44347@@ -2220,7 +2220,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
44348 {
44349 struct iwl_priv *priv = file->private_data;
44350 char buf[8];
44351- int buf_size;
44352+ size_t buf_size;
44353
44354 memset(buf, 0, sizeof(buf));
44355 buf_size = min(count, sizeof(buf) - 1);
44356@@ -2254,7 +2254,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
44357 struct iwl_priv *priv = file->private_data;
44358 u32 event_log_flag;
44359 char buf[8];
44360- int buf_size;
44361+ size_t buf_size;
44362
44363 /* check that the interface is up */
44364 if (!iwl_is_ready(priv))
44365@@ -2308,7 +2308,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
44366 struct iwl_priv *priv = file->private_data;
44367 char buf[8];
44368 u32 calib_disabled;
44369- int buf_size;
44370+ size_t buf_size;
44371
44372 memset(buf, 0, sizeof(buf));
44373 buf_size = min(count, sizeof(buf) - 1);
44374diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
e2b79cd1 44375index aeb70e1..d7b5bb5 100644
bb5f0bf8
AF
44376--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
44377+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
44378@@ -1329,7 +1329,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
44379 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
44380
44381 char buf[8];
44382- int buf_size;
44383+ size_t buf_size;
44384 u32 reset_flag;
44385
44386 memset(buf, 0, sizeof(buf));
44387@@ -1350,7 +1350,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
44388 {
44389 struct iwl_trans *trans = file->private_data;
44390 char buf[8];
44391- int buf_size;
44392+ size_t buf_size;
44393 int csr;
44394
44395 memset(buf, 0, sizeof(buf));
44396diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
44397index cb34c78..9fec0dc 100644
44398--- a/drivers/net/wireless/mac80211_hwsim.c
44399+++ b/drivers/net/wireless/mac80211_hwsim.c
44400@@ -2195,25 +2195,19 @@ static int __init init_mac80211_hwsim(void)
44401
44402 if (channels > 1) {
44403 hwsim_if_comb.num_different_channels = channels;
44404- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
44405- mac80211_hwsim_ops.cancel_hw_scan =
44406- mac80211_hwsim_cancel_hw_scan;
44407- mac80211_hwsim_ops.sw_scan_start = NULL;
44408- mac80211_hwsim_ops.sw_scan_complete = NULL;
44409- mac80211_hwsim_ops.remain_on_channel =
44410- mac80211_hwsim_roc;
44411- mac80211_hwsim_ops.cancel_remain_on_channel =
44412- mac80211_hwsim_croc;
44413- mac80211_hwsim_ops.add_chanctx =
44414- mac80211_hwsim_add_chanctx;
44415- mac80211_hwsim_ops.remove_chanctx =
44416- mac80211_hwsim_remove_chanctx;
44417- mac80211_hwsim_ops.change_chanctx =
44418- mac80211_hwsim_change_chanctx;
44419- mac80211_hwsim_ops.assign_vif_chanctx =
44420- mac80211_hwsim_assign_vif_chanctx;
44421- mac80211_hwsim_ops.unassign_vif_chanctx =
44422- mac80211_hwsim_unassign_vif_chanctx;
44423+ pax_open_kernel();
44424+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
44425+ *(void **)&mac80211_hwsim_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
44426+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
44427+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
44428+ *(void **)&mac80211_hwsim_ops.remain_on_channel = mac80211_hwsim_roc;
44429+ *(void **)&mac80211_hwsim_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
44430+ *(void **)&mac80211_hwsim_ops.add_chanctx = mac80211_hwsim_add_chanctx;
44431+ *(void **)&mac80211_hwsim_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
44432+ *(void **)&mac80211_hwsim_ops.change_chanctx = mac80211_hwsim_change_chanctx;
44433+ *(void **)&mac80211_hwsim_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
44434+ *(void **)&mac80211_hwsim_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
44435+ pax_close_kernel();
44436 }
44437
44438 spin_lock_init(&hwsim_radio_lock);
44439diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
44440index 8169a85..7fa3b47 100644
44441--- a/drivers/net/wireless/rndis_wlan.c
44442+++ b/drivers/net/wireless/rndis_wlan.c
44443@@ -1238,7 +1238,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
44444
44445 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
44446
44447- if (rts_threshold < 0 || rts_threshold > 2347)
44448+ if (rts_threshold > 2347)
44449 rts_threshold = 2347;
44450
44451 tmp = cpu_to_le32(rts_threshold);
44452diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
44453index 7510723..5ba37f5 100644
44454--- a/drivers/net/wireless/rt2x00/rt2x00.h
44455+++ b/drivers/net/wireless/rt2x00/rt2x00.h
44456@@ -386,7 +386,7 @@ struct rt2x00_intf {
44457 * for hardware which doesn't support hardware
44458 * sequence counting.
44459 */
44460- atomic_t seqno;
44461+ atomic_unchecked_t seqno;
44462 };
44463
44464 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
44465diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
44466index d955741..8730748 100644
44467--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
44468+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
44469@@ -252,9 +252,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
44470 * sequence counter given by mac80211.
44471 */
44472 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
44473- seqno = atomic_add_return(0x10, &intf->seqno);
44474+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
44475 else
44476- seqno = atomic_read(&intf->seqno);
44477+ seqno = atomic_read_unchecked(&intf->seqno);
44478
44479 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
44480 hdr->seq_ctrl |= cpu_to_le16(seqno);
44481diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
44482index e2b3d9c..67a5184 100644
44483--- a/drivers/net/wireless/ti/wl1251/sdio.c
44484+++ b/drivers/net/wireless/ti/wl1251/sdio.c
44485@@ -271,13 +271,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
44486
44487 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
44488
44489- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
44490- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
44491+ pax_open_kernel();
44492+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
44493+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
44494+ pax_close_kernel();
44495
44496 wl1251_info("using dedicated interrupt line");
44497 } else {
44498- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
44499- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
44500+ pax_open_kernel();
44501+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
44502+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
44503+ pax_close_kernel();
44504
44505 wl1251_info("using SDIO interrupt");
44506 }
44507diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
44508index 1c627da..69f7d17 100644
44509--- a/drivers/net/wireless/ti/wl12xx/main.c
44510+++ b/drivers/net/wireless/ti/wl12xx/main.c
44511@@ -656,7 +656,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
44512 sizeof(wl->conf.mem));
44513
44514 /* read data preparation is only needed by wl127x */
44515- wl->ops->prepare_read = wl127x_prepare_read;
44516+ pax_open_kernel();
44517+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
44518+ pax_close_kernel();
44519
44520 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
44521 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
44522@@ -681,7 +683,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
44523 sizeof(wl->conf.mem));
44524
44525 /* read data preparation is only needed by wl127x */
44526- wl->ops->prepare_read = wl127x_prepare_read;
44527+ pax_open_kernel();
44528+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
44529+ pax_close_kernel();
44530
44531 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
44532 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
44533diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
44534index 9fa692d..b31fee0 100644
44535--- a/drivers/net/wireless/ti/wl18xx/main.c
44536+++ b/drivers/net/wireless/ti/wl18xx/main.c
44537@@ -1687,8 +1687,10 @@ static int wl18xx_setup(struct wl1271 *wl)
44538 }
44539
44540 if (!checksum_param) {
44541- wl18xx_ops.set_rx_csum = NULL;
44542- wl18xx_ops.init_vif = NULL;
44543+ pax_open_kernel();
44544+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
44545+ *(void **)&wl18xx_ops.init_vif = NULL;
44546+ pax_close_kernel();
44547 }
44548
44549 /* Enable 11a Band only if we have 5G antennas */
44550diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
44551index 7ef0b4a..ff65c28 100644
44552--- a/drivers/net/wireless/zd1211rw/zd_usb.c
44553+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
44554@@ -386,7 +386,7 @@ static inline void handle_regs_int(struct urb *urb)
44555 {
44556 struct zd_usb *usb = urb->context;
44557 struct zd_usb_interrupt *intr = &usb->intr;
44558- int len;
44559+ unsigned int len;
44560 u16 int_num;
44561
44562 ZD_ASSERT(in_interrupt());
44563diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
44564index d93b2b6..ae50401 100644
44565--- a/drivers/oprofile/buffer_sync.c
44566+++ b/drivers/oprofile/buffer_sync.c
44567@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
44568 if (cookie == NO_COOKIE)
44569 offset = pc;
44570 if (cookie == INVALID_COOKIE) {
44571- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
44572+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
44573 offset = pc;
44574 }
44575 if (cookie != last_cookie) {
44576@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
44577 /* add userspace sample */
44578
44579 if (!mm) {
44580- atomic_inc(&oprofile_stats.sample_lost_no_mm);
44581+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
44582 return 0;
44583 }
44584
44585 cookie = lookup_dcookie(mm, s->eip, &offset);
44586
44587 if (cookie == INVALID_COOKIE) {
44588- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
44589+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
44590 return 0;
44591 }
44592
44593@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
44594 /* ignore backtraces if failed to add a sample */
44595 if (state == sb_bt_start) {
44596 state = sb_bt_ignore;
44597- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
44598+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
44599 }
44600 }
44601 release_mm(mm);
44602diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
44603index c0cc4e7..44d4e54 100644
44604--- a/drivers/oprofile/event_buffer.c
44605+++ b/drivers/oprofile/event_buffer.c
44606@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
44607 }
44608
44609 if (buffer_pos == buffer_size) {
44610- atomic_inc(&oprofile_stats.event_lost_overflow);
44611+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
44612 return;
44613 }
44614
44615diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
44616index ed2c3ec..deda85a 100644
44617--- a/drivers/oprofile/oprof.c
44618+++ b/drivers/oprofile/oprof.c
44619@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
44620 if (oprofile_ops.switch_events())
44621 return;
44622
44623- atomic_inc(&oprofile_stats.multiplex_counter);
44624+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
44625 start_switch_worker();
44626 }
44627
44628diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
44629index 84a208d..d61b0a1 100644
44630--- a/drivers/oprofile/oprofile_files.c
44631+++ b/drivers/oprofile/oprofile_files.c
44632@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
44633
44634 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
44635
44636-static ssize_t timeout_read(struct file *file, char __user *buf,
44637+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
44638 size_t count, loff_t *offset)
44639 {
44640 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
44641diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
44642index 917d28e..d62d981 100644
44643--- a/drivers/oprofile/oprofile_stats.c
44644+++ b/drivers/oprofile/oprofile_stats.c
44645@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
44646 cpu_buf->sample_invalid_eip = 0;
44647 }
44648
44649- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
44650- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
44651- atomic_set(&oprofile_stats.event_lost_overflow, 0);
44652- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
44653- atomic_set(&oprofile_stats.multiplex_counter, 0);
44654+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
44655+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
44656+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
44657+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
44658+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
44659 }
44660
44661
44662diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
44663index 38b6fc0..b5cbfce 100644
44664--- a/drivers/oprofile/oprofile_stats.h
44665+++ b/drivers/oprofile/oprofile_stats.h
44666@@ -13,11 +13,11 @@
44667 #include <linux/atomic.h>
44668
44669 struct oprofile_stat_struct {
44670- atomic_t sample_lost_no_mm;
44671- atomic_t sample_lost_no_mapping;
44672- atomic_t bt_lost_no_mapping;
44673- atomic_t event_lost_overflow;
44674- atomic_t multiplex_counter;
44675+ atomic_unchecked_t sample_lost_no_mm;
44676+ atomic_unchecked_t sample_lost_no_mapping;
44677+ atomic_unchecked_t bt_lost_no_mapping;
44678+ atomic_unchecked_t event_lost_overflow;
44679+ atomic_unchecked_t multiplex_counter;
44680 };
44681
44682 extern struct oprofile_stat_struct oprofile_stats;
44683diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
44684index 7c12d9c..558bf3bb 100644
44685--- a/drivers/oprofile/oprofilefs.c
44686+++ b/drivers/oprofile/oprofilefs.c
44687@@ -190,7 +190,7 @@ static const struct file_operations atomic_ro_fops = {
44688
44689
44690 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
44691- char const *name, atomic_t *val)
44692+ char const *name, atomic_unchecked_t *val)
44693 {
44694 return __oprofilefs_create_file(sb, root, name,
44695 &atomic_ro_fops, 0444, val);
44696diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
44697index 93404f7..4a313d8 100644
44698--- a/drivers/oprofile/timer_int.c
44699+++ b/drivers/oprofile/timer_int.c
44700@@ -93,7 +93,7 @@ static int __cpuinit oprofile_cpu_notify(struct notifier_block *self,
44701 return NOTIFY_OK;
44702 }
44703
44704-static struct notifier_block __refdata oprofile_cpu_notifier = {
44705+static struct notifier_block oprofile_cpu_notifier = {
44706 .notifier_call = oprofile_cpu_notify,
44707 };
44708
44709diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
44710index 92ed045..62d39bd7 100644
44711--- a/drivers/parport/procfs.c
44712+++ b/drivers/parport/procfs.c
44713@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
44714
44715 *ppos += len;
44716
44717- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
44718+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
44719 }
44720
44721 #ifdef CONFIG_PARPORT_1284
44722@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
44723
44724 *ppos += len;
44725
44726- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
44727+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
44728 }
44729 #endif /* IEEE1284.3 support. */
44730
44731diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
44732index c35e8ad..fc33beb 100644
44733--- a/drivers/pci/hotplug/acpiphp_ibm.c
44734+++ b/drivers/pci/hotplug/acpiphp_ibm.c
44735@@ -464,7 +464,9 @@ static int __init ibm_acpiphp_init(void)
44736 goto init_cleanup;
44737 }
44738
44739- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
44740+ pax_open_kernel();
44741+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
44742+ pax_close_kernel();
44743 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
44744
44745 return retval;
44746diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
44747index a6a71c4..c91097b 100644
44748--- a/drivers/pci/hotplug/cpcihp_generic.c
44749+++ b/drivers/pci/hotplug/cpcihp_generic.c
44750@@ -73,7 +73,6 @@ static u16 port;
44751 static unsigned int enum_bit;
44752 static u8 enum_mask;
44753
44754-static struct cpci_hp_controller_ops generic_hpc_ops;
44755 static struct cpci_hp_controller generic_hpc;
44756
44757 static int __init validate_parameters(void)
44758@@ -139,6 +138,10 @@ static int query_enum(void)
44759 return ((value & enum_mask) == enum_mask);
44760 }
44761
44762+static struct cpci_hp_controller_ops generic_hpc_ops = {
44763+ .query_enum = query_enum,
44764+};
44765+
44766 static int __init cpcihp_generic_init(void)
44767 {
44768 int status;
44769@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
44770 pci_dev_put(dev);
44771
44772 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
44773- generic_hpc_ops.query_enum = query_enum;
44774 generic_hpc.ops = &generic_hpc_ops;
44775
44776 status = cpci_hp_register_controller(&generic_hpc);
44777diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
44778index 449b4bb..257e2e8 100644
44779--- a/drivers/pci/hotplug/cpcihp_zt5550.c
44780+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
44781@@ -59,7 +59,6 @@
44782 /* local variables */
44783 static bool debug;
44784 static bool poll;
44785-static struct cpci_hp_controller_ops zt5550_hpc_ops;
44786 static struct cpci_hp_controller zt5550_hpc;
44787
44788 /* Primary cPCI bus bridge device */
44789@@ -205,6 +204,10 @@ static int zt5550_hc_disable_irq(void)
44790 return 0;
44791 }
44792
44793+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
44794+ .query_enum = zt5550_hc_query_enum,
44795+};
44796+
44797 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
44798 {
44799 int status;
44800@@ -216,16 +219,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
44801 dbg("returned from zt5550_hc_config");
44802
44803 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
44804- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
44805 zt5550_hpc.ops = &zt5550_hpc_ops;
44806 if(!poll) {
44807 zt5550_hpc.irq = hc_dev->irq;
44808 zt5550_hpc.irq_flags = IRQF_SHARED;
44809 zt5550_hpc.dev_id = hc_dev;
44810
44811- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
44812- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
44813- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
44814+ pax_open_kernel();
44815+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
44816+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
44817+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
44818+ pax_open_kernel();
44819 } else {
44820 info("using ENUM# polling mode");
44821 }
44822diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
44823index 76ba8a1..20ca857 100644
44824--- a/drivers/pci/hotplug/cpqphp_nvram.c
44825+++ b/drivers/pci/hotplug/cpqphp_nvram.c
44826@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
44827
44828 void compaq_nvram_init (void __iomem *rom_start)
44829 {
44830+
44831+#ifndef CONFIG_PAX_KERNEXEC
44832 if (rom_start) {
44833 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
44834 }
44835+#endif
44836+
44837 dbg("int15 entry = %p\n", compaq_int15_entry_point);
44838
44839 /* initialize our int15 lock */
44840diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
44841index ec20f74..c1d961e 100644
44842--- a/drivers/pci/hotplug/pci_hotplug_core.c
44843+++ b/drivers/pci/hotplug/pci_hotplug_core.c
44844@@ -441,8 +441,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
44845 return -EINVAL;
44846 }
44847
44848- slot->ops->owner = owner;
44849- slot->ops->mod_name = mod_name;
44850+ pax_open_kernel();
44851+ *(struct module **)&slot->ops->owner = owner;
44852+ *(const char **)&slot->ops->mod_name = mod_name;
44853+ pax_close_kernel();
44854
44855 mutex_lock(&pci_hp_mutex);
44856 /*
44857diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
44858index 7d72c5e..edce02c 100644
44859--- a/drivers/pci/hotplug/pciehp_core.c
44860+++ b/drivers/pci/hotplug/pciehp_core.c
44861@@ -91,7 +91,7 @@ static int init_slot(struct controller *ctrl)
44862 struct slot *slot = ctrl->slot;
44863 struct hotplug_slot *hotplug = NULL;
44864 struct hotplug_slot_info *info = NULL;
44865- struct hotplug_slot_ops *ops = NULL;
44866+ hotplug_slot_ops_no_const *ops = NULL;
44867 char name[SLOT_NAME_SIZE];
44868 int retval = -ENOMEM;
44869
44870diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
44871index 5b4a9d9..cd5ac1f 100644
44872--- a/drivers/pci/pci-sysfs.c
44873+++ b/drivers/pci/pci-sysfs.c
44874@@ -1071,7 +1071,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
44875 {
44876 /* allocate attribute structure, piggyback attribute name */
44877 int name_len = write_combine ? 13 : 10;
44878- struct bin_attribute *res_attr;
44879+ bin_attribute_no_const *res_attr;
44880 int retval;
44881
44882 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
44883@@ -1256,7 +1256,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
44884 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
44885 {
44886 int retval;
44887- struct bin_attribute *attr;
44888+ bin_attribute_no_const *attr;
44889
44890 /* If the device has VPD, try to expose it in sysfs. */
44891 if (dev->vpd) {
44892@@ -1303,7 +1303,7 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
44893 {
44894 int retval;
44895 int rom_size = 0;
44896- struct bin_attribute *attr;
44897+ bin_attribute_no_const *attr;
44898
44899 if (!sysfs_initialized)
44900 return -EACCES;
44901diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
44902index d1182c4..2a138ec 100644
44903--- a/drivers/pci/pci.h
44904+++ b/drivers/pci/pci.h
44905@@ -92,7 +92,7 @@ struct pci_vpd_ops {
44906 struct pci_vpd {
44907 unsigned int len;
44908 const struct pci_vpd_ops *ops;
44909- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
44910+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
44911 };
44912
44913 int pci_vpd_pci22_init(struct pci_dev *dev);
44914diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
44915index d320df6..ca9a8f6 100644
44916--- a/drivers/pci/pcie/aspm.c
44917+++ b/drivers/pci/pcie/aspm.c
44918@@ -27,9 +27,9 @@
44919 #define MODULE_PARAM_PREFIX "pcie_aspm."
44920
44921 /* Note: those are not register definitions */
44922-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
44923-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
44924-#define ASPM_STATE_L1 (4) /* L1 state */
44925+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
44926+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
44927+#define ASPM_STATE_L1 (4U) /* L1 state */
44928 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
44929 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
44930
44931diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
44932index ea37072..10e58e56 100644
44933--- a/drivers/pci/probe.c
44934+++ b/drivers/pci/probe.c
44935@@ -173,7 +173,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
44936 struct pci_bus_region region;
44937 bool bar_too_big = false, bar_disabled = false;
44938
44939- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
44940+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
44941
44942 /* No printks while decoding is disabled! */
44943 if (!dev->mmio_always_on) {
44944diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
44945index 0812608..b04018c4 100644
44946--- a/drivers/pci/proc.c
44947+++ b/drivers/pci/proc.c
44948@@ -453,7 +453,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
44949 static int __init pci_proc_init(void)
44950 {
44951 struct pci_dev *dev = NULL;
44952+
44953+#ifdef CONFIG_GRKERNSEC_PROC_ADD
44954+#ifdef CONFIG_GRKERNSEC_PROC_USER
44955+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
44956+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44957+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
44958+#endif
44959+#else
44960 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
44961+#endif
44962 proc_create("devices", 0, proc_bus_pci_dir,
44963 &proc_bus_pci_dev_operations);
44964 proc_initialized = 1;
44965diff --git a/drivers/platform/x86/chromeos_laptop.c b/drivers/platform/x86/chromeos_laptop.c
44966index 3e5b4497..dcdfb70 100644
44967--- a/drivers/platform/x86/chromeos_laptop.c
44968+++ b/drivers/platform/x86/chromeos_laptop.c
44969@@ -301,7 +301,7 @@ static int __init setup_tsl2563_als(const struct dmi_system_id *id)
44970 return 0;
44971 }
44972
44973-static struct dmi_system_id __initdata chromeos_laptop_dmi_table[] = {
44974+static struct dmi_system_id __initconst chromeos_laptop_dmi_table[] = {
44975 {
44976 .ident = "Samsung Series 5 550 - Touchpad",
44977 .matches = {
44978diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
44979index 6b22938..bc9700e 100644
44980--- a/drivers/platform/x86/msi-laptop.c
44981+++ b/drivers/platform/x86/msi-laptop.c
44982@@ -1000,12 +1000,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
44983
44984 if (!quirks->ec_read_only) {
44985 /* allow userland write sysfs file */
44986- dev_attr_bluetooth.store = store_bluetooth;
44987- dev_attr_wlan.store = store_wlan;
44988- dev_attr_threeg.store = store_threeg;
44989- dev_attr_bluetooth.attr.mode |= S_IWUSR;
44990- dev_attr_wlan.attr.mode |= S_IWUSR;
44991- dev_attr_threeg.attr.mode |= S_IWUSR;
44992+ pax_open_kernel();
44993+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
44994+ *(void **)&dev_attr_wlan.store = store_wlan;
44995+ *(void **)&dev_attr_threeg.store = store_threeg;
44996+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
44997+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
44998+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
44999+ pax_close_kernel();
45000 }
45001
45002 /* disable hardware control by fn key */
45003diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
45004index 2ac045f..39c443d 100644
45005--- a/drivers/platform/x86/sony-laptop.c
45006+++ b/drivers/platform/x86/sony-laptop.c
45007@@ -2483,7 +2483,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
45008 }
45009
45010 /* High speed charging function */
45011-static struct device_attribute *hsc_handle;
45012+static device_attribute_no_const *hsc_handle;
45013
45014 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
45015 struct device_attribute *attr,
45016diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
45017index 54d31c0..3f896d3 100644
45018--- a/drivers/platform/x86/thinkpad_acpi.c
45019+++ b/drivers/platform/x86/thinkpad_acpi.c
45020@@ -2093,7 +2093,7 @@ static int hotkey_mask_get(void)
45021 return 0;
45022 }
45023
45024-void static hotkey_mask_warn_incomplete_mask(void)
45025+static void hotkey_mask_warn_incomplete_mask(void)
45026 {
45027 /* log only what the user can fix... */
45028 const u32 wantedmask = hotkey_driver_mask &
45029@@ -2324,11 +2324,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
45030 }
45031 }
45032
45033-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
45034- struct tp_nvram_state *newn,
45035- const u32 event_mask)
45036-{
45037-
45038 #define TPACPI_COMPARE_KEY(__scancode, __member) \
45039 do { \
45040 if ((event_mask & (1 << __scancode)) && \
45041@@ -2342,36 +2337,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
45042 tpacpi_hotkey_send_key(__scancode); \
45043 } while (0)
45044
45045- void issue_volchange(const unsigned int oldvol,
45046- const unsigned int newvol)
45047- {
45048- unsigned int i = oldvol;
45049+static void issue_volchange(const unsigned int oldvol,
45050+ const unsigned int newvol,
45051+ const u32 event_mask)
45052+{
45053+ unsigned int i = oldvol;
45054
45055- while (i > newvol) {
45056- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
45057- i--;
45058- }
45059- while (i < newvol) {
45060- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
45061- i++;
45062- }
45063+ while (i > newvol) {
45064+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
45065+ i--;
45066 }
45067+ while (i < newvol) {
45068+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
45069+ i++;
45070+ }
45071+}
45072
45073- void issue_brightnesschange(const unsigned int oldbrt,
45074- const unsigned int newbrt)
45075- {
45076- unsigned int i = oldbrt;
45077+static void issue_brightnesschange(const unsigned int oldbrt,
45078+ const unsigned int newbrt,
45079+ const u32 event_mask)
45080+{
45081+ unsigned int i = oldbrt;
45082
45083- while (i > newbrt) {
45084- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
45085- i--;
45086- }
45087- while (i < newbrt) {
45088- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
45089- i++;
45090- }
45091+ while (i > newbrt) {
45092+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
45093+ i--;
45094+ }
45095+ while (i < newbrt) {
45096+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
45097+ i++;
45098 }
45099+}
45100
45101+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
45102+ struct tp_nvram_state *newn,
45103+ const u32 event_mask)
45104+{
45105 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
45106 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
45107 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
45108@@ -2405,7 +2406,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
45109 oldn->volume_level != newn->volume_level) {
45110 /* recently muted, or repeated mute keypress, or
45111 * multiple presses ending in mute */
45112- issue_volchange(oldn->volume_level, newn->volume_level);
45113+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
45114 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
45115 }
45116 } else {
45117@@ -2415,7 +2416,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
45118 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
45119 }
45120 if (oldn->volume_level != newn->volume_level) {
45121- issue_volchange(oldn->volume_level, newn->volume_level);
45122+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
45123 } else if (oldn->volume_toggle != newn->volume_toggle) {
45124 /* repeated vol up/down keypress at end of scale ? */
45125 if (newn->volume_level == 0)
45126@@ -2428,7 +2429,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
45127 /* handle brightness */
45128 if (oldn->brightness_level != newn->brightness_level) {
45129 issue_brightnesschange(oldn->brightness_level,
45130- newn->brightness_level);
45131+ newn->brightness_level,
45132+ event_mask);
45133 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
45134 /* repeated key presses that didn't change state */
45135 if (newn->brightness_level == 0)
45136@@ -2437,10 +2439,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
45137 && !tp_features.bright_unkfw)
45138 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
45139 }
45140+}
45141
45142 #undef TPACPI_COMPARE_KEY
45143 #undef TPACPI_MAY_SEND_KEY
45144-}
45145
45146 /*
45147 * Polling driver
45148diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
45149index 769d265..a3a05ca 100644
45150--- a/drivers/pnp/pnpbios/bioscalls.c
45151+++ b/drivers/pnp/pnpbios/bioscalls.c
45152@@ -58,7 +58,7 @@ do { \
45153 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
45154 } while(0)
45155
45156-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
45157+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
45158 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
45159
45160 /*
45161@@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
45162
45163 cpu = get_cpu();
45164 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
45165+
45166+ pax_open_kernel();
45167 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
45168+ pax_close_kernel();
45169
45170 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
45171 spin_lock_irqsave(&pnp_bios_lock, flags);
45172@@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
45173 :"memory");
45174 spin_unlock_irqrestore(&pnp_bios_lock, flags);
45175
45176+ pax_open_kernel();
45177 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
45178+ pax_close_kernel();
45179+
45180 put_cpu();
45181
45182 /* If we get here and this is set then the PnP BIOS faulted on us. */
45183@@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
45184 return status;
45185 }
45186
45187-void pnpbios_calls_init(union pnp_bios_install_struct *header)
45188+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
45189 {
45190 int i;
45191
45192@@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
45193 pnp_bios_callpoint.offset = header->fields.pm16offset;
45194 pnp_bios_callpoint.segment = PNP_CS16;
45195
45196+ pax_open_kernel();
45197+
45198 for_each_possible_cpu(i) {
45199 struct desc_struct *gdt = get_cpu_gdt_table(i);
45200 if (!gdt)
45201@@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
45202 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
45203 (unsigned long)__va(header->fields.pm16dseg));
45204 }
45205+
45206+ pax_close_kernel();
45207 }
45208diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
45209index 3e6db1c..1fbbdae 100644
45210--- a/drivers/pnp/resource.c
45211+++ b/drivers/pnp/resource.c
45212@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
45213 return 1;
45214
45215 /* check if the resource is valid */
45216- if (*irq < 0 || *irq > 15)
45217+ if (*irq > 15)
45218 return 0;
45219
45220 /* check if the resource is reserved */
45221@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
45222 return 1;
45223
45224 /* check if the resource is valid */
45225- if (*dma < 0 || *dma == 4 || *dma > 7)
45226+ if (*dma == 4 || *dma > 7)
45227 return 0;
45228
45229 /* check if the resource is reserved */
45230diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
45231index 0c52e2a..3421ab7 100644
45232--- a/drivers/power/pda_power.c
45233+++ b/drivers/power/pda_power.c
45234@@ -37,7 +37,11 @@ static int polling;
45235
45236 #if IS_ENABLED(CONFIG_USB_PHY)
45237 static struct usb_phy *transceiver;
45238-static struct notifier_block otg_nb;
45239+static int otg_handle_notification(struct notifier_block *nb,
45240+ unsigned long event, void *unused);
45241+static struct notifier_block otg_nb = {
45242+ .notifier_call = otg_handle_notification
45243+};
45244 #endif
45245
45246 static struct regulator *ac_draw;
45247@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
45248
45249 #if IS_ENABLED(CONFIG_USB_PHY)
45250 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
45251- otg_nb.notifier_call = otg_handle_notification;
45252 ret = usb_register_notifier(transceiver, &otg_nb);
45253 if (ret) {
45254 dev_err(dev, "failure to register otg notifier\n");
45255diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
45256index cc439fd..8fa30df 100644
45257--- a/drivers/power/power_supply.h
45258+++ b/drivers/power/power_supply.h
45259@@ -16,12 +16,12 @@ struct power_supply;
45260
45261 #ifdef CONFIG_SYSFS
45262
45263-extern void power_supply_init_attrs(struct device_type *dev_type);
45264+extern void power_supply_init_attrs(void);
45265 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
45266
45267 #else
45268
45269-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
45270+static inline void power_supply_init_attrs(void) {}
45271 #define power_supply_uevent NULL
45272
45273 #endif /* CONFIG_SYSFS */
45274diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
45275index 1c517c3..ffa2f17 100644
45276--- a/drivers/power/power_supply_core.c
45277+++ b/drivers/power/power_supply_core.c
45278@@ -24,7 +24,10 @@
45279 struct class *power_supply_class;
45280 EXPORT_SYMBOL_GPL(power_supply_class);
45281
45282-static struct device_type power_supply_dev_type;
45283+extern const struct attribute_group *power_supply_attr_groups[];
45284+static struct device_type power_supply_dev_type = {
45285+ .groups = power_supply_attr_groups,
45286+};
45287
45288 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
45289 struct power_supply *supply)
45290@@ -554,7 +557,7 @@ static int __init power_supply_class_init(void)
45291 return PTR_ERR(power_supply_class);
45292
45293 power_supply_class->dev_uevent = power_supply_uevent;
45294- power_supply_init_attrs(&power_supply_dev_type);
45295+ power_supply_init_attrs();
45296
45297 return 0;
45298 }
45299diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
45300index 29178f7..c65f324 100644
45301--- a/drivers/power/power_supply_sysfs.c
45302+++ b/drivers/power/power_supply_sysfs.c
45303@@ -230,17 +230,15 @@ static struct attribute_group power_supply_attr_group = {
45304 .is_visible = power_supply_attr_is_visible,
45305 };
45306
45307-static const struct attribute_group *power_supply_attr_groups[] = {
45308+const struct attribute_group *power_supply_attr_groups[] = {
45309 &power_supply_attr_group,
45310 NULL,
45311 };
45312
45313-void power_supply_init_attrs(struct device_type *dev_type)
45314+void power_supply_init_attrs(void)
45315 {
45316 int i;
45317
45318- dev_type->groups = power_supply_attr_groups;
45319-
45320 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
45321 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
45322 }
45323diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
45324index d428ef9..fdc0357 100644
45325--- a/drivers/regulator/max8660.c
45326+++ b/drivers/regulator/max8660.c
45327@@ -333,8 +333,10 @@ static int max8660_probe(struct i2c_client *client,
45328 max8660->shadow_regs[MAX8660_OVER1] = 5;
45329 } else {
45330 /* Otherwise devices can be toggled via software */
45331- max8660_dcdc_ops.enable = max8660_dcdc_enable;
45332- max8660_dcdc_ops.disable = max8660_dcdc_disable;
45333+ pax_open_kernel();
45334+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
45335+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
45336+ pax_close_kernel();
45337 }
45338
45339 /*
45340diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
45341index adb1414..c13e0ce 100644
45342--- a/drivers/regulator/max8973-regulator.c
45343+++ b/drivers/regulator/max8973-regulator.c
45344@@ -401,9 +401,11 @@ static int max8973_probe(struct i2c_client *client,
45345 if (!pdata->enable_ext_control) {
45346 max->desc.enable_reg = MAX8973_VOUT;
45347 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
45348- max8973_dcdc_ops.enable = regulator_enable_regmap;
45349- max8973_dcdc_ops.disable = regulator_disable_regmap;
45350- max8973_dcdc_ops.is_enabled = regulator_is_enabled_regmap;
45351+ pax_open_kernel();
45352+ *(void **)&max8973_dcdc_ops.enable = regulator_enable_regmap;
45353+ *(void **)&max8973_dcdc_ops.disable = regulator_disable_regmap;
45354+ *(void **)&max8973_dcdc_ops.is_enabled = regulator_is_enabled_regmap;
45355+ pax_close_kernel();
45356 }
45357
45358 max->enable_external_control = pdata->enable_ext_control;
45359diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
45360index b716283..3cc4349 100644
45361--- a/drivers/regulator/mc13892-regulator.c
45362+++ b/drivers/regulator/mc13892-regulator.c
45363@@ -582,10 +582,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
45364 }
45365 mc13xxx_unlock(mc13892);
45366
45367- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
45368+ pax_open_kernel();
45369+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
45370 = mc13892_vcam_set_mode;
45371- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
45372+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
45373 = mc13892_vcam_get_mode;
45374+ pax_close_kernel();
45375
45376 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
45377 ARRAY_SIZE(mc13892_regulators));
45378diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
45379index f1cb706..4c7832a 100644
45380--- a/drivers/rtc/rtc-cmos.c
45381+++ b/drivers/rtc/rtc-cmos.c
45382@@ -724,7 +724,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
45383 hpet_rtc_timer_init();
45384
45385 /* export at least the first block of NVRAM */
45386- nvram.size = address_space - NVRAM_OFFSET;
45387+ pax_open_kernel();
45388+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
45389+ pax_close_kernel();
45390 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
45391 if (retval < 0) {
45392 dev_dbg(dev, "can't create nvram file? %d\n", retval);
45393diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
45394index d049393..bb20be0 100644
45395--- a/drivers/rtc/rtc-dev.c
45396+++ b/drivers/rtc/rtc-dev.c
45397@@ -16,6 +16,7 @@
45398 #include <linux/module.h>
45399 #include <linux/rtc.h>
45400 #include <linux/sched.h>
45401+#include <linux/grsecurity.h>
45402 #include "rtc-core.h"
45403
45404 static dev_t rtc_devt;
45405@@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
45406 if (copy_from_user(&tm, uarg, sizeof(tm)))
45407 return -EFAULT;
45408
45409+ gr_log_timechange();
45410+
45411 return rtc_set_time(rtc, &tm);
45412
45413 case RTC_PIE_ON:
45414diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
45415index b53992a..776df84 100644
45416--- a/drivers/rtc/rtc-ds1307.c
45417+++ b/drivers/rtc/rtc-ds1307.c
45418@@ -107,7 +107,7 @@ struct ds1307 {
45419 u8 offset; /* register's offset */
45420 u8 regs[11];
45421 u16 nvram_offset;
45422- struct bin_attribute *nvram;
45423+ bin_attribute_no_const *nvram;
45424 enum ds_type type;
45425 unsigned long flags;
45426 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
45427diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
45428index 130f29a..6179d03 100644
45429--- a/drivers/rtc/rtc-m48t59.c
45430+++ b/drivers/rtc/rtc-m48t59.c
45431@@ -482,7 +482,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
45432 goto out;
45433 }
45434
45435- m48t59_nvram_attr.size = pdata->offset;
45436+ pax_open_kernel();
45437+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
45438+ pax_close_kernel();
45439
45440 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
45441 if (ret) {
45442diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
45443index e693af6..2e525b6 100644
45444--- a/drivers/scsi/bfa/bfa_fcpim.h
45445+++ b/drivers/scsi/bfa/bfa_fcpim.h
45446@@ -36,7 +36,7 @@ struct bfa_iotag_s {
45447
45448 struct bfa_itn_s {
45449 bfa_isr_func_t isr;
45450-};
45451+} __no_const;
45452
45453 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
45454 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
45455diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
45456index 23a90e7..9cf04ee 100644
45457--- a/drivers/scsi/bfa/bfa_ioc.h
45458+++ b/drivers/scsi/bfa/bfa_ioc.h
45459@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
45460 bfa_ioc_disable_cbfn_t disable_cbfn;
45461 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
45462 bfa_ioc_reset_cbfn_t reset_cbfn;
45463-};
45464+} __no_const;
45465
45466 /*
45467 * IOC event notification mechanism.
45468@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
45469 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
45470 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
45471 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
45472-};
45473+} __no_const;
45474
45475 /*
45476 * Queue element to wait for room in request queue. FIFO order is
45477diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
45478index df0c3c7..b00e1d0 100644
45479--- a/drivers/scsi/hosts.c
45480+++ b/drivers/scsi/hosts.c
45481@@ -42,7 +42,7 @@
45482 #include "scsi_logging.h"
45483
45484
45485-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
45486+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
45487
45488
45489 static void scsi_host_cls_release(struct device *dev)
45490@@ -361,7 +361,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
45491 * subtract one because we increment first then return, but we need to
45492 * know what the next host number was before increment
45493 */
45494- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
45495+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
45496 shost->dma_channel = 0xff;
45497
45498 /* These three are default values which can be overridden */
45499diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
45500index 7f4f790..b75b92a 100644
45501--- a/drivers/scsi/hpsa.c
45502+++ b/drivers/scsi/hpsa.c
45503@@ -554,7 +554,7 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
45504 unsigned long flags;
45505
45506 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
45507- return h->access.command_completed(h, q);
45508+ return h->access->command_completed(h, q);
45509
45510 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
45511 a = rq->head[rq->current_entry];
45512@@ -3422,7 +3422,7 @@ static void start_io(struct ctlr_info *h)
45513 while (!list_empty(&h->reqQ)) {
45514 c = list_entry(h->reqQ.next, struct CommandList, list);
45515 /* can't do anything if fifo is full */
45516- if ((h->access.fifo_full(h))) {
45517+ if ((h->access->fifo_full(h))) {
45518 dev_warn(&h->pdev->dev, "fifo full\n");
45519 break;
45520 }
45521@@ -3444,7 +3444,7 @@ static void start_io(struct ctlr_info *h)
45522
45523 /* Tell the controller execute command */
45524 spin_unlock_irqrestore(&h->lock, flags);
45525- h->access.submit_command(h, c);
45526+ h->access->submit_command(h, c);
45527 spin_lock_irqsave(&h->lock, flags);
45528 }
45529 spin_unlock_irqrestore(&h->lock, flags);
45530@@ -3452,17 +3452,17 @@ static void start_io(struct ctlr_info *h)
45531
45532 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
45533 {
45534- return h->access.command_completed(h, q);
45535+ return h->access->command_completed(h, q);
45536 }
45537
45538 static inline bool interrupt_pending(struct ctlr_info *h)
45539 {
45540- return h->access.intr_pending(h);
45541+ return h->access->intr_pending(h);
45542 }
45543
45544 static inline long interrupt_not_for_us(struct ctlr_info *h)
45545 {
45546- return (h->access.intr_pending(h) == 0) ||
45547+ return (h->access->intr_pending(h) == 0) ||
45548 (h->interrupts_enabled == 0);
45549 }
45550
45551@@ -4364,7 +4364,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
45552 if (prod_index < 0)
45553 return -ENODEV;
45554 h->product_name = products[prod_index].product_name;
45555- h->access = *(products[prod_index].access);
45556+ h->access = products[prod_index].access;
45557
45558 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
45559 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
45560@@ -4646,7 +4646,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
45561
45562 assert_spin_locked(&lockup_detector_lock);
45563 remove_ctlr_from_lockup_detector_list(h);
45564- h->access.set_intr_mask(h, HPSA_INTR_OFF);
45565+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
45566 spin_lock_irqsave(&h->lock, flags);
45567 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
45568 spin_unlock_irqrestore(&h->lock, flags);
45569@@ -4823,7 +4823,7 @@ reinit_after_soft_reset:
45570 }
45571
45572 /* make sure the board interrupts are off */
45573- h->access.set_intr_mask(h, HPSA_INTR_OFF);
45574+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
45575
45576 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
45577 goto clean2;
45578@@ -4857,7 +4857,7 @@ reinit_after_soft_reset:
45579 * fake ones to scoop up any residual completions.
45580 */
45581 spin_lock_irqsave(&h->lock, flags);
45582- h->access.set_intr_mask(h, HPSA_INTR_OFF);
45583+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
45584 spin_unlock_irqrestore(&h->lock, flags);
45585 free_irqs(h);
45586 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
45587@@ -4876,9 +4876,9 @@ reinit_after_soft_reset:
45588 dev_info(&h->pdev->dev, "Board READY.\n");
45589 dev_info(&h->pdev->dev,
45590 "Waiting for stale completions to drain.\n");
45591- h->access.set_intr_mask(h, HPSA_INTR_ON);
45592+ h->access->set_intr_mask(h, HPSA_INTR_ON);
45593 msleep(10000);
45594- h->access.set_intr_mask(h, HPSA_INTR_OFF);
45595+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
45596
45597 rc = controller_reset_failed(h->cfgtable);
45598 if (rc)
45599@@ -4899,7 +4899,7 @@ reinit_after_soft_reset:
45600 }
45601
45602 /* Turn the interrupts on so we can service requests */
45603- h->access.set_intr_mask(h, HPSA_INTR_ON);
45604+ h->access->set_intr_mask(h, HPSA_INTR_ON);
45605
45606 hpsa_hba_inquiry(h);
45607 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
45608@@ -4954,7 +4954,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
45609 * To write all data in the battery backed cache to disks
45610 */
45611 hpsa_flush_cache(h);
45612- h->access.set_intr_mask(h, HPSA_INTR_OFF);
45613+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
45614 hpsa_free_irqs_and_disable_msix(h);
45615 }
45616
45617@@ -5122,7 +5122,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
45618 return;
45619 }
45620 /* Change the access methods to the performant access methods */
45621- h->access = SA5_performant_access;
45622+ h->access = &SA5_performant_access;
45623 h->transMethod = CFGTBL_Trans_Performant;
45624 }
45625
45626diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
45627index 9816479..c5d4e97 100644
45628--- a/drivers/scsi/hpsa.h
45629+++ b/drivers/scsi/hpsa.h
45630@@ -79,7 +79,7 @@ struct ctlr_info {
45631 unsigned int msix_vector;
45632 unsigned int msi_vector;
45633 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
45634- struct access_method access;
45635+ struct access_method *access;
45636
45637 /* queue and queue Info */
45638 struct list_head reqQ;
45639diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
45640index 8b928c6..9c76300 100644
45641--- a/drivers/scsi/libfc/fc_exch.c
45642+++ b/drivers/scsi/libfc/fc_exch.c
45643@@ -100,12 +100,12 @@ struct fc_exch_mgr {
45644 u16 pool_max_index;
45645
45646 struct {
45647- atomic_t no_free_exch;
45648- atomic_t no_free_exch_xid;
45649- atomic_t xid_not_found;
45650- atomic_t xid_busy;
45651- atomic_t seq_not_found;
45652- atomic_t non_bls_resp;
45653+ atomic_unchecked_t no_free_exch;
45654+ atomic_unchecked_t no_free_exch_xid;
45655+ atomic_unchecked_t xid_not_found;
45656+ atomic_unchecked_t xid_busy;
45657+ atomic_unchecked_t seq_not_found;
45658+ atomic_unchecked_t non_bls_resp;
45659 } stats;
45660 };
45661
45662@@ -736,7 +736,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
45663 /* allocate memory for exchange */
45664 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
45665 if (!ep) {
45666- atomic_inc(&mp->stats.no_free_exch);
45667+ atomic_inc_unchecked(&mp->stats.no_free_exch);
45668 goto out;
45669 }
45670 memset(ep, 0, sizeof(*ep));
45671@@ -797,7 +797,7 @@ out:
45672 return ep;
45673 err:
45674 spin_unlock_bh(&pool->lock);
45675- atomic_inc(&mp->stats.no_free_exch_xid);
45676+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
45677 mempool_free(ep, mp->ep_pool);
45678 return NULL;
45679 }
45680@@ -940,7 +940,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
45681 xid = ntohs(fh->fh_ox_id); /* we originated exch */
45682 ep = fc_exch_find(mp, xid);
45683 if (!ep) {
45684- atomic_inc(&mp->stats.xid_not_found);
45685+ atomic_inc_unchecked(&mp->stats.xid_not_found);
45686 reject = FC_RJT_OX_ID;
45687 goto out;
45688 }
45689@@ -970,7 +970,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
45690 ep = fc_exch_find(mp, xid);
45691 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
45692 if (ep) {
45693- atomic_inc(&mp->stats.xid_busy);
45694+ atomic_inc_unchecked(&mp->stats.xid_busy);
45695 reject = FC_RJT_RX_ID;
45696 goto rel;
45697 }
45698@@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
45699 }
45700 xid = ep->xid; /* get our XID */
45701 } else if (!ep) {
45702- atomic_inc(&mp->stats.xid_not_found);
45703+ atomic_inc_unchecked(&mp->stats.xid_not_found);
45704 reject = FC_RJT_RX_ID; /* XID not found */
45705 goto out;
45706 }
45707@@ -998,7 +998,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
45708 } else {
45709 sp = &ep->seq;
45710 if (sp->id != fh->fh_seq_id) {
45711- atomic_inc(&mp->stats.seq_not_found);
45712+ atomic_inc_unchecked(&mp->stats.seq_not_found);
45713 if (f_ctl & FC_FC_END_SEQ) {
45714 /*
45715 * Update sequence_id based on incoming last
45716@@ -1448,22 +1448,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
45717
45718 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
45719 if (!ep) {
45720- atomic_inc(&mp->stats.xid_not_found);
45721+ atomic_inc_unchecked(&mp->stats.xid_not_found);
45722 goto out;
45723 }
45724 if (ep->esb_stat & ESB_ST_COMPLETE) {
45725- atomic_inc(&mp->stats.xid_not_found);
45726+ atomic_inc_unchecked(&mp->stats.xid_not_found);
45727 goto rel;
45728 }
45729 if (ep->rxid == FC_XID_UNKNOWN)
45730 ep->rxid = ntohs(fh->fh_rx_id);
45731 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
45732- atomic_inc(&mp->stats.xid_not_found);
45733+ atomic_inc_unchecked(&mp->stats.xid_not_found);
45734 goto rel;
45735 }
45736 if (ep->did != ntoh24(fh->fh_s_id) &&
45737 ep->did != FC_FID_FLOGI) {
45738- atomic_inc(&mp->stats.xid_not_found);
45739+ atomic_inc_unchecked(&mp->stats.xid_not_found);
45740 goto rel;
45741 }
45742 sof = fr_sof(fp);
45743@@ -1472,7 +1472,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
45744 sp->ssb_stat |= SSB_ST_RESP;
45745 sp->id = fh->fh_seq_id;
45746 } else if (sp->id != fh->fh_seq_id) {
45747- atomic_inc(&mp->stats.seq_not_found);
45748+ atomic_inc_unchecked(&mp->stats.seq_not_found);
45749 goto rel;
45750 }
45751
45752@@ -1536,9 +1536,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
45753 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
45754
45755 if (!sp)
45756- atomic_inc(&mp->stats.xid_not_found);
45757+ atomic_inc_unchecked(&mp->stats.xid_not_found);
45758 else
45759- atomic_inc(&mp->stats.non_bls_resp);
45760+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
45761
45762 fc_frame_free(fp);
45763 }
45764@@ -2185,13 +2185,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
45765
45766 list_for_each_entry(ema, &lport->ema_list, ema_list) {
45767 mp = ema->mp;
45768- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
45769+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
45770 st->fc_no_free_exch_xid +=
45771- atomic_read(&mp->stats.no_free_exch_xid);
45772- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
45773- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
45774- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
45775- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
45776+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
45777+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
45778+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
45779+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
45780+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
45781 }
45782 }
45783 EXPORT_SYMBOL(fc_exch_update_stats);
45784diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
45785index 161c98e..6d563b3 100644
45786--- a/drivers/scsi/libsas/sas_ata.c
45787+++ b/drivers/scsi/libsas/sas_ata.c
45788@@ -554,7 +554,7 @@ static struct ata_port_operations sas_sata_ops = {
45789 .postreset = ata_std_postreset,
45790 .error_handler = ata_std_error_handler,
45791 .post_internal_cmd = sas_ata_post_internal,
45792- .qc_defer = ata_std_qc_defer,
45793+ .qc_defer = ata_std_qc_defer,
45794 .qc_prep = ata_noop_qc_prep,
45795 .qc_issue = sas_ata_qc_issue,
45796 .qc_fill_rtf = sas_ata_qc_fill_rtf,
45797diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
45798index bcc56ca..6f4174a 100644
45799--- a/drivers/scsi/lpfc/lpfc.h
45800+++ b/drivers/scsi/lpfc/lpfc.h
45801@@ -431,7 +431,7 @@ struct lpfc_vport {
45802 struct dentry *debug_nodelist;
45803 struct dentry *vport_debugfs_root;
45804 struct lpfc_debugfs_trc *disc_trc;
45805- atomic_t disc_trc_cnt;
45806+ atomic_unchecked_t disc_trc_cnt;
45807 #endif
45808 uint8_t stat_data_enabled;
45809 uint8_t stat_data_blocked;
45810@@ -865,8 +865,8 @@ struct lpfc_hba {
45811 struct timer_list fabric_block_timer;
45812 unsigned long bit_flags;
45813 #define FABRIC_COMANDS_BLOCKED 0
45814- atomic_t num_rsrc_err;
45815- atomic_t num_cmd_success;
45816+ atomic_unchecked_t num_rsrc_err;
45817+ atomic_unchecked_t num_cmd_success;
45818 unsigned long last_rsrc_error_time;
45819 unsigned long last_ramp_down_time;
45820 unsigned long last_ramp_up_time;
45821@@ -902,7 +902,7 @@ struct lpfc_hba {
45822
45823 struct dentry *debug_slow_ring_trc;
45824 struct lpfc_debugfs_trc *slow_ring_trc;
45825- atomic_t slow_ring_trc_cnt;
45826+ atomic_unchecked_t slow_ring_trc_cnt;
45827 /* iDiag debugfs sub-directory */
45828 struct dentry *idiag_root;
45829 struct dentry *idiag_pci_cfg;
45830diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
45831index f525ecb..32549a4 100644
45832--- a/drivers/scsi/lpfc/lpfc_debugfs.c
45833+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
45834@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
45835
45836 #include <linux/debugfs.h>
45837
45838-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
45839+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
45840 static unsigned long lpfc_debugfs_start_time = 0L;
45841
45842 /* iDiag */
45843@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
45844 lpfc_debugfs_enable = 0;
45845
45846 len = 0;
45847- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
45848+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
45849 (lpfc_debugfs_max_disc_trc - 1);
45850 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
45851 dtp = vport->disc_trc + i;
45852@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
45853 lpfc_debugfs_enable = 0;
45854
45855 len = 0;
45856- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
45857+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
45858 (lpfc_debugfs_max_slow_ring_trc - 1);
45859 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
45860 dtp = phba->slow_ring_trc + i;
45861@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
45862 !vport || !vport->disc_trc)
45863 return;
45864
45865- index = atomic_inc_return(&vport->disc_trc_cnt) &
45866+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
45867 (lpfc_debugfs_max_disc_trc - 1);
45868 dtp = vport->disc_trc + index;
45869 dtp->fmt = fmt;
45870 dtp->data1 = data1;
45871 dtp->data2 = data2;
45872 dtp->data3 = data3;
45873- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
45874+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
45875 dtp->jif = jiffies;
45876 #endif
45877 return;
45878@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
45879 !phba || !phba->slow_ring_trc)
45880 return;
45881
45882- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
45883+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
45884 (lpfc_debugfs_max_slow_ring_trc - 1);
45885 dtp = phba->slow_ring_trc + index;
45886 dtp->fmt = fmt;
45887 dtp->data1 = data1;
45888 dtp->data2 = data2;
45889 dtp->data3 = data3;
45890- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
45891+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
45892 dtp->jif = jiffies;
45893 #endif
45894 return;
45895@@ -4182,7 +4182,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
45896 "slow_ring buffer\n");
45897 goto debug_failed;
45898 }
45899- atomic_set(&phba->slow_ring_trc_cnt, 0);
45900+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
45901 memset(phba->slow_ring_trc, 0,
45902 (sizeof(struct lpfc_debugfs_trc) *
45903 lpfc_debugfs_max_slow_ring_trc));
45904@@ -4228,7 +4228,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
45905 "buffer\n");
45906 goto debug_failed;
45907 }
45908- atomic_set(&vport->disc_trc_cnt, 0);
45909+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
45910
45911 snprintf(name, sizeof(name), "discovery_trace");
45912 vport->debug_disc_trc =
45913diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
45914index cb465b2..2e7b25f 100644
45915--- a/drivers/scsi/lpfc/lpfc_init.c
45916+++ b/drivers/scsi/lpfc/lpfc_init.c
45917@@ -10950,8 +10950,10 @@ lpfc_init(void)
45918 "misc_register returned with status %d", error);
45919
45920 if (lpfc_enable_npiv) {
45921- lpfc_transport_functions.vport_create = lpfc_vport_create;
45922- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
45923+ pax_open_kernel();
45924+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
45925+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
45926+ pax_close_kernel();
45927 }
45928 lpfc_transport_template =
45929 fc_attach_transport(&lpfc_transport_functions);
45930diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
45931index 8523b278e..ce1d812 100644
45932--- a/drivers/scsi/lpfc/lpfc_scsi.c
45933+++ b/drivers/scsi/lpfc/lpfc_scsi.c
45934@@ -331,7 +331,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
45935 uint32_t evt_posted;
45936
45937 spin_lock_irqsave(&phba->hbalock, flags);
45938- atomic_inc(&phba->num_rsrc_err);
45939+ atomic_inc_unchecked(&phba->num_rsrc_err);
45940 phba->last_rsrc_error_time = jiffies;
45941
45942 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
45943@@ -372,7 +372,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
45944 unsigned long flags;
45945 struct lpfc_hba *phba = vport->phba;
45946 uint32_t evt_posted;
45947- atomic_inc(&phba->num_cmd_success);
45948+ atomic_inc_unchecked(&phba->num_cmd_success);
45949
45950 if (vport->cfg_lun_queue_depth <= queue_depth)
45951 return;
45952@@ -416,8 +416,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
45953 unsigned long num_rsrc_err, num_cmd_success;
45954 int i;
45955
45956- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
45957- num_cmd_success = atomic_read(&phba->num_cmd_success);
45958+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
45959+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
45960
45961 /*
45962 * The error and success command counters are global per
45963@@ -445,8 +445,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
45964 }
45965 }
45966 lpfc_destroy_vport_work_array(phba, vports);
45967- atomic_set(&phba->num_rsrc_err, 0);
45968- atomic_set(&phba->num_cmd_success, 0);
45969+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
45970+ atomic_set_unchecked(&phba->num_cmd_success, 0);
45971 }
45972
45973 /**
45974@@ -480,8 +480,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
45975 }
45976 }
45977 lpfc_destroy_vport_work_array(phba, vports);
45978- atomic_set(&phba->num_rsrc_err, 0);
45979- atomic_set(&phba->num_cmd_success, 0);
45980+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
45981+ atomic_set_unchecked(&phba->num_cmd_success, 0);
45982 }
45983
45984 /**
45985diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
45986index 8e1b737..50ff510 100644
45987--- a/drivers/scsi/pmcraid.c
45988+++ b/drivers/scsi/pmcraid.c
45989@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
45990 res->scsi_dev = scsi_dev;
45991 scsi_dev->hostdata = res;
45992 res->change_detected = 0;
45993- atomic_set(&res->read_failures, 0);
45994- atomic_set(&res->write_failures, 0);
45995+ atomic_set_unchecked(&res->read_failures, 0);
45996+ atomic_set_unchecked(&res->write_failures, 0);
45997 rc = 0;
45998 }
45999 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
46000@@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
46001
46002 /* If this was a SCSI read/write command keep count of errors */
46003 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
46004- atomic_inc(&res->read_failures);
46005+ atomic_inc_unchecked(&res->read_failures);
46006 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
46007- atomic_inc(&res->write_failures);
46008+ atomic_inc_unchecked(&res->write_failures);
46009
46010 if (!RES_IS_GSCSI(res->cfg_entry) &&
46011 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
46012@@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
46013 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
46014 * hrrq_id assigned here in queuecommand
46015 */
46016- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
46017+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
46018 pinstance->num_hrrq;
46019 cmd->cmd_done = pmcraid_io_done;
46020
46021@@ -3846,7 +3846,7 @@ static long pmcraid_ioctl_passthrough(
46022 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
46023 * hrrq_id assigned here in queuecommand
46024 */
46025- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
46026+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
46027 pinstance->num_hrrq;
46028
46029 if (request_size) {
46030@@ -4483,7 +4483,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
46031
46032 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
46033 /* add resources only after host is added into system */
46034- if (!atomic_read(&pinstance->expose_resources))
46035+ if (!atomic_read_unchecked(&pinstance->expose_resources))
46036 return;
46037
46038 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
46039@@ -5310,8 +5310,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
46040 init_waitqueue_head(&pinstance->reset_wait_q);
46041
46042 atomic_set(&pinstance->outstanding_cmds, 0);
46043- atomic_set(&pinstance->last_message_id, 0);
46044- atomic_set(&pinstance->expose_resources, 0);
46045+ atomic_set_unchecked(&pinstance->last_message_id, 0);
46046+ atomic_set_unchecked(&pinstance->expose_resources, 0);
46047
46048 INIT_LIST_HEAD(&pinstance->free_res_q);
46049 INIT_LIST_HEAD(&pinstance->used_res_q);
46050@@ -6024,7 +6024,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
46051 /* Schedule worker thread to handle CCN and take care of adding and
46052 * removing devices to OS
46053 */
46054- atomic_set(&pinstance->expose_resources, 1);
46055+ atomic_set_unchecked(&pinstance->expose_resources, 1);
46056 schedule_work(&pinstance->worker_q);
46057 return rc;
46058
46059diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
46060index e1d150f..6c6df44 100644
46061--- a/drivers/scsi/pmcraid.h
46062+++ b/drivers/scsi/pmcraid.h
46063@@ -748,7 +748,7 @@ struct pmcraid_instance {
46064 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
46065
46066 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
46067- atomic_t last_message_id;
46068+ atomic_unchecked_t last_message_id;
46069
46070 /* configuration table */
46071 struct pmcraid_config_table *cfg_table;
46072@@ -777,7 +777,7 @@ struct pmcraid_instance {
46073 atomic_t outstanding_cmds;
46074
46075 /* should add/delete resources to mid-layer now ?*/
46076- atomic_t expose_resources;
46077+ atomic_unchecked_t expose_resources;
46078
46079
46080
46081@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
46082 struct pmcraid_config_table_entry_ext cfg_entry_ext;
46083 };
46084 struct scsi_device *scsi_dev; /* Link scsi_device structure */
46085- atomic_t read_failures; /* count of failed READ commands */
46086- atomic_t write_failures; /* count of failed WRITE commands */
46087+ atomic_unchecked_t read_failures; /* count of failed READ commands */
46088+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
46089
46090 /* To indicate add/delete/modify during CCN */
46091 u8 change_detected;
46092diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
46093index bf60c63..74d4dce 100644
46094--- a/drivers/scsi/qla2xxx/qla_attr.c
46095+++ b/drivers/scsi/qla2xxx/qla_attr.c
46096@@ -2001,7 +2001,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
46097 return 0;
46098 }
46099
46100-struct fc_function_template qla2xxx_transport_functions = {
46101+fc_function_template_no_const qla2xxx_transport_functions = {
46102
46103 .show_host_node_name = 1,
46104 .show_host_port_name = 1,
46105@@ -2048,7 +2048,7 @@ struct fc_function_template qla2xxx_transport_functions = {
46106 .bsg_timeout = qla24xx_bsg_timeout,
46107 };
46108
46109-struct fc_function_template qla2xxx_transport_vport_functions = {
46110+fc_function_template_no_const qla2xxx_transport_vport_functions = {
46111
46112 .show_host_node_name = 1,
46113 .show_host_port_name = 1,
46114diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
46115index 026bfde..90c4018 100644
46116--- a/drivers/scsi/qla2xxx/qla_gbl.h
46117+++ b/drivers/scsi/qla2xxx/qla_gbl.h
46118@@ -528,8 +528,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
46119 struct device_attribute;
46120 extern struct device_attribute *qla2x00_host_attrs[];
46121 struct fc_function_template;
46122-extern struct fc_function_template qla2xxx_transport_functions;
46123-extern struct fc_function_template qla2xxx_transport_vport_functions;
46124+extern fc_function_template_no_const qla2xxx_transport_functions;
46125+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
46126 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
46127 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
46128 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
46129diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
46130index ad72c1d..afc9a98 100644
46131--- a/drivers/scsi/qla2xxx/qla_os.c
46132+++ b/drivers/scsi/qla2xxx/qla_os.c
46133@@ -1571,8 +1571,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
46134 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
46135 /* Ok, a 64bit DMA mask is applicable. */
46136 ha->flags.enable_64bit_addressing = 1;
46137- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
46138- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
46139+ pax_open_kernel();
46140+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
46141+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
46142+ pax_close_kernel();
46143 return;
46144 }
46145 }
46146diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
46147index ddf16a8..80f4dd0 100644
46148--- a/drivers/scsi/qla4xxx/ql4_def.h
46149+++ b/drivers/scsi/qla4xxx/ql4_def.h
46150@@ -291,7 +291,7 @@ struct ddb_entry {
46151 * (4000 only) */
46152 atomic_t relogin_timer; /* Max Time to wait for
46153 * relogin to complete */
46154- atomic_t relogin_retry_count; /* Num of times relogin has been
46155+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
46156 * retried */
46157 uint32_t default_time2wait; /* Default Min time between
46158 * relogins (+aens) */
46159diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
46160index 4d231c1..2892c37 100644
46161--- a/drivers/scsi/qla4xxx/ql4_os.c
46162+++ b/drivers/scsi/qla4xxx/ql4_os.c
46163@@ -2971,12 +2971,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
46164 */
46165 if (!iscsi_is_session_online(cls_sess)) {
46166 /* Reset retry relogin timer */
46167- atomic_inc(&ddb_entry->relogin_retry_count);
46168+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
46169 DEBUG2(ql4_printk(KERN_INFO, ha,
46170 "%s: index[%d] relogin timed out-retrying"
46171 " relogin (%d), retry (%d)\n", __func__,
46172 ddb_entry->fw_ddb_index,
46173- atomic_read(&ddb_entry->relogin_retry_count),
46174+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
46175 ddb_entry->default_time2wait + 4));
46176 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
46177 atomic_set(&ddb_entry->retry_relogin_timer,
46178@@ -5081,7 +5081,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
46179
46180 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
46181 atomic_set(&ddb_entry->relogin_timer, 0);
46182- atomic_set(&ddb_entry->relogin_retry_count, 0);
46183+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
46184 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
46185 ddb_entry->default_relogin_timeout =
46186 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
46187diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
46188index eaa808e..95f8841 100644
46189--- a/drivers/scsi/scsi.c
46190+++ b/drivers/scsi/scsi.c
46191@@ -661,7 +661,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
46192 unsigned long timeout;
46193 int rtn = 0;
46194
46195- atomic_inc(&cmd->device->iorequest_cnt);
46196+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
46197
46198 /* check if the device is still usable */
46199 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
46200diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
46201index 86d5220..f22c51a 100644
46202--- a/drivers/scsi/scsi_lib.c
46203+++ b/drivers/scsi/scsi_lib.c
46204@@ -1458,7 +1458,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
46205 shost = sdev->host;
46206 scsi_init_cmd_errh(cmd);
46207 cmd->result = DID_NO_CONNECT << 16;
46208- atomic_inc(&cmd->device->iorequest_cnt);
46209+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
46210
46211 /*
46212 * SCSI request completion path will do scsi_device_unbusy(),
46213@@ -1484,9 +1484,9 @@ static void scsi_softirq_done(struct request *rq)
46214
46215 INIT_LIST_HEAD(&cmd->eh_entry);
46216
46217- atomic_inc(&cmd->device->iodone_cnt);
46218+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
46219 if (cmd->result)
46220- atomic_inc(&cmd->device->ioerr_cnt);
46221+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
46222
46223 disposition = scsi_decide_disposition(cmd);
46224 if (disposition != SUCCESS &&
46225diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
46226index 931a7d9..0c2a754 100644
46227--- a/drivers/scsi/scsi_sysfs.c
46228+++ b/drivers/scsi/scsi_sysfs.c
46229@@ -658,7 +658,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
46230 char *buf) \
46231 { \
46232 struct scsi_device *sdev = to_scsi_device(dev); \
46233- unsigned long long count = atomic_read(&sdev->field); \
46234+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
46235 return snprintf(buf, 20, "0x%llx\n", count); \
46236 } \
46237 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
46238diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
46239index 84a1fdf..693b0d6 100644
46240--- a/drivers/scsi/scsi_tgt_lib.c
46241+++ b/drivers/scsi/scsi_tgt_lib.c
46242@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
46243 int err;
46244
46245 dprintk("%lx %u\n", uaddr, len);
46246- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
46247+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
46248 if (err) {
46249 /*
46250 * TODO: need to fixup sg_tablesize, max_segment_size,
46251diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
46252index e106c27..11a380e 100644
46253--- a/drivers/scsi/scsi_transport_fc.c
46254+++ b/drivers/scsi/scsi_transport_fc.c
46255@@ -497,7 +497,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
46256 * Netlink Infrastructure
46257 */
46258
46259-static atomic_t fc_event_seq;
46260+static atomic_unchecked_t fc_event_seq;
46261
46262 /**
46263 * fc_get_event_number - Obtain the next sequential FC event number
46264@@ -510,7 +510,7 @@ static atomic_t fc_event_seq;
46265 u32
46266 fc_get_event_number(void)
46267 {
46268- return atomic_add_return(1, &fc_event_seq);
46269+ return atomic_add_return_unchecked(1, &fc_event_seq);
46270 }
46271 EXPORT_SYMBOL(fc_get_event_number);
46272
46273@@ -654,7 +654,7 @@ static __init int fc_transport_init(void)
46274 {
46275 int error;
46276
46277- atomic_set(&fc_event_seq, 0);
46278+ atomic_set_unchecked(&fc_event_seq, 0);
46279
46280 error = transport_class_register(&fc_host_class);
46281 if (error)
46282@@ -844,7 +844,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
46283 char *cp;
46284
46285 *val = simple_strtoul(buf, &cp, 0);
46286- if ((*cp && (*cp != '\n')) || (*val < 0))
46287+ if (*cp && (*cp != '\n'))
46288 return -EINVAL;
46289 /*
46290 * Check for overflow; dev_loss_tmo is u32
46291diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
46292index 133926b..903000d 100644
46293--- a/drivers/scsi/scsi_transport_iscsi.c
46294+++ b/drivers/scsi/scsi_transport_iscsi.c
46295@@ -80,7 +80,7 @@ struct iscsi_internal {
46296 struct transport_container session_cont;
46297 };
46298
46299-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
46300+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
46301 static struct workqueue_struct *iscsi_eh_timer_workq;
46302
46303 static DEFINE_IDA(iscsi_sess_ida);
46304@@ -1738,7 +1738,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
46305 int err;
46306
46307 ihost = shost->shost_data;
46308- session->sid = atomic_add_return(1, &iscsi_session_nr);
46309+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
46310
46311 if (target_id == ISCSI_MAX_TARGET) {
46312 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
46313@@ -3944,7 +3944,7 @@ static __init int iscsi_transport_init(void)
46314 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
46315 ISCSI_TRANSPORT_VERSION);
46316
46317- atomic_set(&iscsi_session_nr, 0);
46318+ atomic_set_unchecked(&iscsi_session_nr, 0);
46319
46320 err = class_register(&iscsi_transport_class);
46321 if (err)
46322diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
46323index f379c7f..e8fc69c 100644
46324--- a/drivers/scsi/scsi_transport_srp.c
46325+++ b/drivers/scsi/scsi_transport_srp.c
46326@@ -33,7 +33,7 @@
46327 #include "scsi_transport_srp_internal.h"
46328
46329 struct srp_host_attrs {
46330- atomic_t next_port_id;
46331+ atomic_unchecked_t next_port_id;
46332 };
46333 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
46334
46335@@ -61,7 +61,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
46336 struct Scsi_Host *shost = dev_to_shost(dev);
46337 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
46338
46339- atomic_set(&srp_host->next_port_id, 0);
46340+ atomic_set_unchecked(&srp_host->next_port_id, 0);
46341 return 0;
46342 }
46343
46344@@ -210,7 +210,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
46345 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
46346 rport->roles = ids->roles;
46347
46348- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
46349+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
46350 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
46351
46352 transport_setup_device(&rport->dev);
46353diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
46354index 610417e..1544fa9 100644
46355--- a/drivers/scsi/sd.c
46356+++ b/drivers/scsi/sd.c
46357@@ -2928,7 +2928,7 @@ static int sd_probe(struct device *dev)
46358 sdkp->disk = gd;
46359 sdkp->index = index;
46360 atomic_set(&sdkp->openers, 0);
46361- atomic_set(&sdkp->device->ioerr_cnt, 0);
46362+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
46363
46364 if (!sdp->request_queue->rq_timeout) {
46365 if (sdp->type != TYPE_MOD)
46366diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
46367index df5e961..df6b97f 100644
46368--- a/drivers/scsi/sg.c
46369+++ b/drivers/scsi/sg.c
46370@@ -1102,7 +1102,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
46371 sdp->disk->disk_name,
46372 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
46373 NULL,
46374- (char *)arg);
46375+ (char __user *)arg);
46376 case BLKTRACESTART:
46377 return blk_trace_startstop(sdp->device->request_queue, 1);
46378 case BLKTRACESTOP:
46379diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
46380index 32b7bb1..2f1c4bd 100644
46381--- a/drivers/spi/spi.c
46382+++ b/drivers/spi/spi.c
46383@@ -1631,7 +1631,7 @@ int spi_bus_unlock(struct spi_master *master)
46384 EXPORT_SYMBOL_GPL(spi_bus_unlock);
46385
46386 /* portable code must never pass more than 32 bytes */
46387-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
46388+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
46389
46390 static u8 *buf;
46391
46392diff --git a/drivers/staging/media/solo6x10/solo6x10-core.c b/drivers/staging/media/solo6x10/solo6x10-core.c
46393index 3675020..e80d92c 100644
46394--- a/drivers/staging/media/solo6x10/solo6x10-core.c
46395+++ b/drivers/staging/media/solo6x10/solo6x10-core.c
46396@@ -434,7 +434,7 @@ static void solo_device_release(struct device *dev)
46397
46398 static int solo_sysfs_init(struct solo_dev *solo_dev)
46399 {
46400- struct bin_attribute *sdram_attr = &solo_dev->sdram_attr;
46401+ bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr;
46402 struct device *dev = &solo_dev->dev;
46403 const char *driver;
46404 int i;
46405diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
46406index 34afc16..ffe44dd 100644
46407--- a/drivers/staging/octeon/ethernet-rx.c
46408+++ b/drivers/staging/octeon/ethernet-rx.c
46409@@ -421,11 +421,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
46410 /* Increment RX stats for virtual ports */
46411 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
46412 #ifdef CONFIG_64BIT
46413- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
46414- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
46415+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
46416+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
46417 #else
46418- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
46419- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
46420+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
46421+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
46422 #endif
46423 }
46424 netif_receive_skb(skb);
46425@@ -437,9 +437,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
46426 dev->name);
46427 */
46428 #ifdef CONFIG_64BIT
46429- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
46430+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
46431 #else
46432- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
46433+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
46434 #endif
46435 dev_kfree_skb_irq(skb);
46436 }
46437diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
46438index c3a90e7..023619a 100644
46439--- a/drivers/staging/octeon/ethernet.c
46440+++ b/drivers/staging/octeon/ethernet.c
46441@@ -252,11 +252,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
46442 * since the RX tasklet also increments it.
46443 */
46444 #ifdef CONFIG_64BIT
46445- atomic64_add(rx_status.dropped_packets,
46446- (atomic64_t *)&priv->stats.rx_dropped);
46447+ atomic64_add_unchecked(rx_status.dropped_packets,
46448+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
46449 #else
46450- atomic_add(rx_status.dropped_packets,
46451- (atomic_t *)&priv->stats.rx_dropped);
46452+ atomic_add_unchecked(rx_status.dropped_packets,
46453+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
46454 #endif
46455 }
46456
46457diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
46458index dc23395..cf7e9b1 100644
46459--- a/drivers/staging/rtl8712/rtl871x_io.h
46460+++ b/drivers/staging/rtl8712/rtl871x_io.h
46461@@ -108,7 +108,7 @@ struct _io_ops {
46462 u8 *pmem);
46463 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
46464 u8 *pmem);
46465-};
46466+} __no_const;
46467
46468 struct io_req {
46469 struct list_head list;
46470diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
46471index 1f5088b..0e59820 100644
46472--- a/drivers/staging/sbe-2t3e3/netdev.c
46473+++ b/drivers/staging/sbe-2t3e3/netdev.c
46474@@ -51,7 +51,7 @@ static int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
46475 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
46476
46477 if (rlen)
46478- if (copy_to_user(data, &resp, rlen))
46479+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
46480 return -EFAULT;
46481
46482 return 0;
46483diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
46484index a863a98..d272795 100644
46485--- a/drivers/staging/usbip/vhci.h
46486+++ b/drivers/staging/usbip/vhci.h
46487@@ -83,7 +83,7 @@ struct vhci_hcd {
46488 unsigned resuming:1;
46489 unsigned long re_timeout;
46490
46491- atomic_t seqnum;
46492+ atomic_unchecked_t seqnum;
46493
46494 /*
46495 * NOTE:
46496diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
46497index d7974cb..d78076b 100644
46498--- a/drivers/staging/usbip/vhci_hcd.c
46499+++ b/drivers/staging/usbip/vhci_hcd.c
46500@@ -441,7 +441,7 @@ static void vhci_tx_urb(struct urb *urb)
46501
46502 spin_lock(&vdev->priv_lock);
46503
46504- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
46505+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
46506 if (priv->seqnum == 0xffff)
46507 dev_info(&urb->dev->dev, "seqnum max\n");
46508
46509@@ -687,7 +687,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
46510 return -ENOMEM;
46511 }
46512
46513- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
46514+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
46515 if (unlink->seqnum == 0xffff)
46516 pr_info("seqnum max\n");
46517
46518@@ -891,7 +891,7 @@ static int vhci_start(struct usb_hcd *hcd)
46519 vdev->rhport = rhport;
46520 }
46521
46522- atomic_set(&vhci->seqnum, 0);
46523+ atomic_set_unchecked(&vhci->seqnum, 0);
46524 spin_lock_init(&vhci->lock);
46525
46526 hcd->power_budget = 0; /* no limit */
46527diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
46528index d07fcb5..358e1e1 100644
46529--- a/drivers/staging/usbip/vhci_rx.c
46530+++ b/drivers/staging/usbip/vhci_rx.c
46531@@ -80,7 +80,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
46532 if (!urb) {
46533 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
46534 pr_info("max seqnum %d\n",
46535- atomic_read(&the_controller->seqnum));
46536+ atomic_read_unchecked(&the_controller->seqnum));
46537 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
46538 return;
46539 }
46540diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
46541index 8417c2f..ef5ebd6 100644
46542--- a/drivers/staging/vt6655/hostap.c
46543+++ b/drivers/staging/vt6655/hostap.c
46544@@ -69,14 +69,13 @@ static int msglevel = MSG_LEVEL_INFO;
46545 *
46546 */
46547
46548+static net_device_ops_no_const apdev_netdev_ops;
46549+
46550 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
46551 {
46552 PSDevice apdev_priv;
46553 struct net_device *dev = pDevice->dev;
46554 int ret;
46555- const struct net_device_ops apdev_netdev_ops = {
46556- .ndo_start_xmit = pDevice->tx_80211,
46557- };
46558
46559 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
46560
46561@@ -88,6 +87,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
46562 *apdev_priv = *pDevice;
46563 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
46564
46565+ /* only half broken now */
46566+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
46567 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
46568
46569 pDevice->apdev->type = ARPHRD_IEEE80211;
46570diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
46571index c699a30..b90a5fd 100644
46572--- a/drivers/staging/vt6656/hostap.c
46573+++ b/drivers/staging/vt6656/hostap.c
46574@@ -60,14 +60,13 @@ static int msglevel =MSG_LEVEL_INFO;
46575 *
46576 */
46577
46578+static net_device_ops_no_const apdev_netdev_ops;
46579+
46580 static int hostap_enable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
46581 {
46582 struct vnt_private *apdev_priv;
46583 struct net_device *dev = pDevice->dev;
46584 int ret;
46585- const struct net_device_ops apdev_netdev_ops = {
46586- .ndo_start_xmit = pDevice->tx_80211,
46587- };
46588
46589 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
46590
46591@@ -79,6 +78,8 @@ static int hostap_enable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
46592 *apdev_priv = *pDevice;
46593 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
46594
46595+ /* only half broken now */
46596+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
46597 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
46598
46599 pDevice->apdev->type = ARPHRD_IEEE80211;
46600diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
46601index d128ce2..fc1f9a1 100644
46602--- a/drivers/staging/zcache/tmem.h
46603+++ b/drivers/staging/zcache/tmem.h
46604@@ -225,7 +225,7 @@ struct tmem_pamops {
46605 bool (*is_remote)(void *);
46606 int (*replace_in_obj)(void *, struct tmem_obj *);
46607 #endif
46608-};
46609+} __no_const;
46610 extern void tmem_register_pamops(struct tmem_pamops *m);
46611
46612 /* memory allocation methods provided by the host implementation */
46613@@ -234,7 +234,7 @@ struct tmem_hostops {
46614 void (*obj_free)(struct tmem_obj *, struct tmem_pool *);
46615 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
46616 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
46617-};
46618+} __no_const;
46619 extern void tmem_register_hostops(struct tmem_hostops *m);
46620
46621 /* core tmem accessor functions */
46622diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
46623index 4630481..c26782a 100644
46624--- a/drivers/target/target_core_device.c
46625+++ b/drivers/target/target_core_device.c
46626@@ -1400,7 +1400,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
46627 spin_lock_init(&dev->se_port_lock);
46628 spin_lock_init(&dev->se_tmr_lock);
46629 spin_lock_init(&dev->qf_cmd_lock);
46630- atomic_set(&dev->dev_ordered_id, 0);
46631+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
46632 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
46633 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
46634 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
46635diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
46636index 21e3158..43c6004 100644
46637--- a/drivers/target/target_core_transport.c
46638+++ b/drivers/target/target_core_transport.c
46639@@ -1080,7 +1080,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
46640 * Used to determine when ORDERED commands should go from
46641 * Dormant to Active status.
46642 */
46643- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
46644+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
46645 smp_mb__after_atomic_inc();
46646 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
46647 cmd->se_ordered_id, cmd->sam_task_attr,
46648diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
46649index 33f83fe..d80f8e1 100644
46650--- a/drivers/tty/cyclades.c
46651+++ b/drivers/tty/cyclades.c
46652@@ -1570,10 +1570,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
46653 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
46654 info->port.count);
46655 #endif
46656- info->port.count++;
46657+ atomic_inc(&info->port.count);
46658 #ifdef CY_DEBUG_COUNT
46659 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
46660- current->pid, info->port.count);
46661+ current->pid, atomic_read(&info->port.count));
46662 #endif
46663
46664 /*
46665@@ -3972,7 +3972,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
46666 for (j = 0; j < cy_card[i].nports; j++) {
46667 info = &cy_card[i].ports[j];
46668
46669- if (info->port.count) {
46670+ if (atomic_read(&info->port.count)) {
46671 /* XXX is the ldisc num worth this? */
46672 struct tty_struct *tty;
46673 struct tty_ldisc *ld;
46674diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
46675index eb255e8..f637a57 100644
46676--- a/drivers/tty/hvc/hvc_console.c
46677+++ b/drivers/tty/hvc/hvc_console.c
46678@@ -338,7 +338,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
46679
46680 spin_lock_irqsave(&hp->port.lock, flags);
46681 /* Check and then increment for fast path open. */
46682- if (hp->port.count++ > 0) {
46683+ if (atomic_inc_return(&hp->port.count) > 1) {
46684 spin_unlock_irqrestore(&hp->port.lock, flags);
46685 hvc_kick();
46686 return 0;
46687@@ -388,7 +388,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
46688
46689 spin_lock_irqsave(&hp->port.lock, flags);
46690
46691- if (--hp->port.count == 0) {
46692+ if (atomic_dec_return(&hp->port.count) == 0) {
46693 spin_unlock_irqrestore(&hp->port.lock, flags);
46694 /* We are done with the tty pointer now. */
46695 tty_port_tty_set(&hp->port, NULL);
46696@@ -406,9 +406,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
46697 */
46698 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
46699 } else {
46700- if (hp->port.count < 0)
46701+ if (atomic_read(&hp->port.count) < 0)
46702 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
46703- hp->vtermno, hp->port.count);
46704+ hp->vtermno, atomic_read(&hp->port.count));
46705 spin_unlock_irqrestore(&hp->port.lock, flags);
46706 }
46707 }
46708@@ -438,12 +438,12 @@ static void hvc_hangup(struct tty_struct *tty)
46709 * open->hangup case this can be called after the final close so prevent
46710 * that from happening for now.
46711 */
46712- if (hp->port.count <= 0) {
46713+ if (atomic_read(&hp->port.count) <= 0) {
46714 spin_unlock_irqrestore(&hp->port.lock, flags);
46715 return;
46716 }
46717
46718- hp->port.count = 0;
46719+ atomic_set(&hp->port.count, 0);
46720 spin_unlock_irqrestore(&hp->port.lock, flags);
46721 tty_port_tty_set(&hp->port, NULL);
46722
46723@@ -491,7 +491,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
46724 return -EPIPE;
46725
46726 /* FIXME what's this (unprotected) check for? */
46727- if (hp->port.count <= 0)
46728+ if (atomic_read(&hp->port.count) <= 0)
46729 return -EIO;
46730
46731 spin_lock_irqsave(&hp->lock, flags);
46732diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
46733index 81e939e..95ead10 100644
46734--- a/drivers/tty/hvc/hvcs.c
46735+++ b/drivers/tty/hvc/hvcs.c
46736@@ -83,6 +83,7 @@
46737 #include <asm/hvcserver.h>
46738 #include <asm/uaccess.h>
46739 #include <asm/vio.h>
46740+#include <asm/local.h>
46741
46742 /*
46743 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
46744@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
46745
46746 spin_lock_irqsave(&hvcsd->lock, flags);
46747
46748- if (hvcsd->port.count > 0) {
46749+ if (atomic_read(&hvcsd->port.count) > 0) {
46750 spin_unlock_irqrestore(&hvcsd->lock, flags);
46751 printk(KERN_INFO "HVCS: vterm state unchanged. "
46752 "The hvcs device node is still in use.\n");
46753@@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
46754 }
46755 }
46756
46757- hvcsd->port.count = 0;
46758+ atomic_set(&hvcsd->port.count, 0);
46759 hvcsd->port.tty = tty;
46760 tty->driver_data = hvcsd;
46761
46762@@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
46763 unsigned long flags;
46764
46765 spin_lock_irqsave(&hvcsd->lock, flags);
46766- hvcsd->port.count++;
46767+ atomic_inc(&hvcsd->port.count);
46768 hvcsd->todo_mask |= HVCS_SCHED_READ;
46769 spin_unlock_irqrestore(&hvcsd->lock, flags);
46770
46771@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
46772 hvcsd = tty->driver_data;
46773
46774 spin_lock_irqsave(&hvcsd->lock, flags);
46775- if (--hvcsd->port.count == 0) {
46776+ if (atomic_dec_and_test(&hvcsd->port.count)) {
46777
46778 vio_disable_interrupts(hvcsd->vdev);
46779
46780@@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
46781
46782 free_irq(irq, hvcsd);
46783 return;
46784- } else if (hvcsd->port.count < 0) {
46785+ } else if (atomic_read(&hvcsd->port.count) < 0) {
46786 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
46787 " is missmanaged.\n",
46788- hvcsd->vdev->unit_address, hvcsd->port.count);
46789+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
46790 }
46791
46792 spin_unlock_irqrestore(&hvcsd->lock, flags);
46793@@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
46794
46795 spin_lock_irqsave(&hvcsd->lock, flags);
46796 /* Preserve this so that we know how many kref refs to put */
46797- temp_open_count = hvcsd->port.count;
46798+ temp_open_count = atomic_read(&hvcsd->port.count);
46799
46800 /*
46801 * Don't kref put inside the spinlock because the destruction
46802@@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
46803 tty->driver_data = NULL;
46804 hvcsd->port.tty = NULL;
46805
46806- hvcsd->port.count = 0;
46807+ atomic_set(&hvcsd->port.count, 0);
46808
46809 /* This will drop any buffered data on the floor which is OK in a hangup
46810 * scenario. */
46811@@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
46812 * the middle of a write operation? This is a crummy place to do this
46813 * but we want to keep it all in the spinlock.
46814 */
46815- if (hvcsd->port.count <= 0) {
46816+ if (atomic_read(&hvcsd->port.count) <= 0) {
46817 spin_unlock_irqrestore(&hvcsd->lock, flags);
46818 return -ENODEV;
46819 }
46820@@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
46821 {
46822 struct hvcs_struct *hvcsd = tty->driver_data;
46823
46824- if (!hvcsd || hvcsd->port.count <= 0)
46825+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
46826 return 0;
46827
46828 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
46829diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
46830index 8fd72ff..34a0bed 100644
46831--- a/drivers/tty/ipwireless/tty.c
46832+++ b/drivers/tty/ipwireless/tty.c
46833@@ -29,6 +29,7 @@
46834 #include <linux/tty_driver.h>
46835 #include <linux/tty_flip.h>
46836 #include <linux/uaccess.h>
46837+#include <asm/local.h>
46838
46839 #include "tty.h"
46840 #include "network.h"
46841@@ -99,10 +100,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
46842 mutex_unlock(&tty->ipw_tty_mutex);
46843 return -ENODEV;
46844 }
46845- if (tty->port.count == 0)
46846+ if (atomic_read(&tty->port.count) == 0)
46847 tty->tx_bytes_queued = 0;
46848
46849- tty->port.count++;
46850+ atomic_inc(&tty->port.count);
46851
46852 tty->port.tty = linux_tty;
46853 linux_tty->driver_data = tty;
46854@@ -118,9 +119,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
46855
46856 static void do_ipw_close(struct ipw_tty *tty)
46857 {
46858- tty->port.count--;
46859-
46860- if (tty->port.count == 0) {
46861+ if (atomic_dec_return(&tty->port.count) == 0) {
46862 struct tty_struct *linux_tty = tty->port.tty;
46863
46864 if (linux_tty != NULL) {
46865@@ -141,7 +140,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
46866 return;
46867
46868 mutex_lock(&tty->ipw_tty_mutex);
46869- if (tty->port.count == 0) {
46870+ if (atomic_read(&tty->port.count) == 0) {
46871 mutex_unlock(&tty->ipw_tty_mutex);
46872 return;
46873 }
46874@@ -164,7 +163,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
46875
46876 mutex_lock(&tty->ipw_tty_mutex);
46877
46878- if (!tty->port.count) {
46879+ if (!atomic_read(&tty->port.count)) {
46880 mutex_unlock(&tty->ipw_tty_mutex);
46881 return;
46882 }
46883@@ -206,7 +205,7 @@ static int ipw_write(struct tty_struct *linux_tty,
46884 return -ENODEV;
46885
46886 mutex_lock(&tty->ipw_tty_mutex);
46887- if (!tty->port.count) {
46888+ if (!atomic_read(&tty->port.count)) {
46889 mutex_unlock(&tty->ipw_tty_mutex);
46890 return -EINVAL;
46891 }
46892@@ -246,7 +245,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
46893 if (!tty)
46894 return -ENODEV;
46895
46896- if (!tty->port.count)
46897+ if (!atomic_read(&tty->port.count))
46898 return -EINVAL;
46899
46900 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
46901@@ -288,7 +287,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
46902 if (!tty)
46903 return 0;
46904
46905- if (!tty->port.count)
46906+ if (!atomic_read(&tty->port.count))
46907 return 0;
46908
46909 return tty->tx_bytes_queued;
46910@@ -369,7 +368,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
46911 if (!tty)
46912 return -ENODEV;
46913
46914- if (!tty->port.count)
46915+ if (!atomic_read(&tty->port.count))
46916 return -EINVAL;
46917
46918 return get_control_lines(tty);
46919@@ -385,7 +384,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
46920 if (!tty)
46921 return -ENODEV;
46922
46923- if (!tty->port.count)
46924+ if (!atomic_read(&tty->port.count))
46925 return -EINVAL;
46926
46927 return set_control_lines(tty, set, clear);
46928@@ -399,7 +398,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
46929 if (!tty)
46930 return -ENODEV;
46931
46932- if (!tty->port.count)
46933+ if (!atomic_read(&tty->port.count))
46934 return -EINVAL;
46935
46936 /* FIXME: Exactly how is the tty object locked here .. */
46937@@ -555,7 +554,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
46938 * are gone */
46939 mutex_lock(&ttyj->ipw_tty_mutex);
46940 }
46941- while (ttyj->port.count)
46942+ while (atomic_read(&ttyj->port.count))
46943 do_ipw_close(ttyj);
46944 ipwireless_disassociate_network_ttys(network,
46945 ttyj->channel_idx);
46946diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
46947index 1deaca4..c8582d4 100644
46948--- a/drivers/tty/moxa.c
46949+++ b/drivers/tty/moxa.c
46950@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
46951 }
46952
46953 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
46954- ch->port.count++;
46955+ atomic_inc(&ch->port.count);
46956 tty->driver_data = ch;
46957 tty_port_tty_set(&ch->port, tty);
46958 mutex_lock(&ch->port.mutex);
46959diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
46960index 6422390..49003ac8 100644
46961--- a/drivers/tty/n_gsm.c
46962+++ b/drivers/tty/n_gsm.c
46963@@ -1632,7 +1632,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
46964 spin_lock_init(&dlci->lock);
46965 mutex_init(&dlci->mutex);
46966 dlci->fifo = &dlci->_fifo;
46967- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
46968+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
46969 kfree(dlci);
46970 return NULL;
46971 }
46972@@ -2932,7 +2932,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
46973 struct gsm_dlci *dlci = tty->driver_data;
46974 struct tty_port *port = &dlci->port;
46975
46976- port->count++;
46977+ atomic_inc(&port->count);
46978 dlci_get(dlci);
46979 dlci_get(dlci->gsm->dlci[0]);
46980 mux_get(dlci->gsm);
46981diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
46982index 6c7fe90..9241dab 100644
46983--- a/drivers/tty/n_tty.c
46984+++ b/drivers/tty/n_tty.c
46985@@ -2203,6 +2203,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
46986 {
46987 *ops = tty_ldisc_N_TTY;
46988 ops->owner = NULL;
46989- ops->refcount = ops->flags = 0;
46990+ atomic_set(&ops->refcount, 0);
46991+ ops->flags = 0;
46992 }
46993 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
46994diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
46995index abfd990..5ab5da9 100644
46996--- a/drivers/tty/pty.c
46997+++ b/drivers/tty/pty.c
46998@@ -796,8 +796,10 @@ static void __init unix98_pty_init(void)
46999 panic("Couldn't register Unix98 pts driver");
47000
47001 /* Now create the /dev/ptmx special device */
47002+ pax_open_kernel();
47003 tty_default_fops(&ptmx_fops);
47004- ptmx_fops.open = ptmx_open;
47005+ *(void **)&ptmx_fops.open = ptmx_open;
47006+ pax_close_kernel();
47007
47008 cdev_init(&ptmx_cdev, &ptmx_fops);
47009 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
47010diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
47011index 354564e..fe50d9a 100644
47012--- a/drivers/tty/rocket.c
47013+++ b/drivers/tty/rocket.c
47014@@ -914,7 +914,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
47015 tty->driver_data = info;
47016 tty_port_tty_set(port, tty);
47017
47018- if (port->count++ == 0) {
47019+ if (atomic_inc_return(&port->count) == 1) {
47020 atomic_inc(&rp_num_ports_open);
47021
47022 #ifdef ROCKET_DEBUG_OPEN
47023@@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
47024 #endif
47025 }
47026 #ifdef ROCKET_DEBUG_OPEN
47027- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
47028+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
47029 #endif
47030
47031 /*
47032@@ -1515,7 +1515,7 @@ static void rp_hangup(struct tty_struct *tty)
47033 spin_unlock_irqrestore(&info->port.lock, flags);
47034 return;
47035 }
47036- if (info->port.count)
47037+ if (atomic_read(&info->port.count))
47038 atomic_dec(&rp_num_ports_open);
47039 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
47040 spin_unlock_irqrestore(&info->port.lock, flags);
47041diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
47042index 1002054..dd644a8 100644
47043--- a/drivers/tty/serial/kgdboc.c
47044+++ b/drivers/tty/serial/kgdboc.c
47045@@ -24,8 +24,9 @@
47046 #define MAX_CONFIG_LEN 40
47047
47048 static struct kgdb_io kgdboc_io_ops;
47049+static struct kgdb_io kgdboc_io_ops_console;
47050
47051-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
47052+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
47053 static int configured = -1;
47054
47055 static char config[MAX_CONFIG_LEN];
47056@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
47057 kgdboc_unregister_kbd();
47058 if (configured == 1)
47059 kgdb_unregister_io_module(&kgdboc_io_ops);
47060+ else if (configured == 2)
47061+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
47062 }
47063
47064 static int configure_kgdboc(void)
47065@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
47066 int err;
47067 char *cptr = config;
47068 struct console *cons;
47069+ int is_console = 0;
47070
47071 err = kgdboc_option_setup(config);
47072 if (err || !strlen(config) || isspace(config[0]))
47073 goto noconfig;
47074
47075 err = -ENODEV;
47076- kgdboc_io_ops.is_console = 0;
47077 kgdb_tty_driver = NULL;
47078
47079 kgdboc_use_kms = 0;
47080@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
47081 int idx;
47082 if (cons->device && cons->device(cons, &idx) == p &&
47083 idx == tty_line) {
47084- kgdboc_io_ops.is_console = 1;
47085+ is_console = 1;
47086 break;
47087 }
47088 cons = cons->next;
47089@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
47090 kgdb_tty_line = tty_line;
47091
47092 do_register:
47093- err = kgdb_register_io_module(&kgdboc_io_ops);
47094+ if (is_console) {
47095+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
47096+ configured = 2;
47097+ } else {
47098+ err = kgdb_register_io_module(&kgdboc_io_ops);
47099+ configured = 1;
47100+ }
47101 if (err)
47102 goto noconfig;
47103
47104@@ -205,8 +214,6 @@ do_register:
47105 if (err)
47106 goto nmi_con_failed;
47107
47108- configured = 1;
47109-
47110 return 0;
47111
47112 nmi_con_failed:
47113@@ -223,7 +230,7 @@ noconfig:
47114 static int __init init_kgdboc(void)
47115 {
47116 /* Already configured? */
47117- if (configured == 1)
47118+ if (configured >= 1)
47119 return 0;
47120
47121 return configure_kgdboc();
47122@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
47123 if (config[len - 1] == '\n')
47124 config[len - 1] = '\0';
47125
47126- if (configured == 1)
47127+ if (configured >= 1)
47128 cleanup_kgdboc();
47129
47130 /* Go and configure with the new params. */
47131@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
47132 .post_exception = kgdboc_post_exp_handler,
47133 };
47134
47135+static struct kgdb_io kgdboc_io_ops_console = {
47136+ .name = "kgdboc",
47137+ .read_char = kgdboc_get_char,
47138+ .write_char = kgdboc_put_char,
47139+ .pre_exception = kgdboc_pre_exp_handler,
47140+ .post_exception = kgdboc_post_exp_handler,
47141+ .is_console = 1
47142+};
47143+
47144 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
47145 /* This is only available if kgdboc is a built in for early debugging */
47146 static int __init kgdboc_early_init(char *opt)
47147diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
47148index 0c8a9fa..234a95f 100644
47149--- a/drivers/tty/serial/samsung.c
47150+++ b/drivers/tty/serial/samsung.c
47151@@ -453,11 +453,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
47152 }
47153 }
47154
47155+static int s3c64xx_serial_startup(struct uart_port *port);
47156 static int s3c24xx_serial_startup(struct uart_port *port)
47157 {
47158 struct s3c24xx_uart_port *ourport = to_ourport(port);
47159 int ret;
47160
47161+ /* Startup sequence is different for s3c64xx and higher SoC's */
47162+ if (s3c24xx_serial_has_interrupt_mask(port))
47163+ return s3c64xx_serial_startup(port);
47164+
47165 dbg("s3c24xx_serial_startup: port=%p (%08lx,%p)\n",
47166 port->mapbase, port->membase);
47167
47168@@ -1124,10 +1129,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
47169 /* setup info for port */
47170 port->dev = &platdev->dev;
47171
47172- /* Startup sequence is different for s3c64xx and higher SoC's */
47173- if (s3c24xx_serial_has_interrupt_mask(port))
47174- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
47175-
47176 port->uartclk = 1;
47177
47178 if (cfg->uart_flags & UPF_CONS_FLOW) {
47179diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
47180index f87dbfd..42ad4b1 100644
47181--- a/drivers/tty/serial/serial_core.c
47182+++ b/drivers/tty/serial/serial_core.c
47183@@ -1454,7 +1454,7 @@ static void uart_hangup(struct tty_struct *tty)
47184 uart_flush_buffer(tty);
47185 uart_shutdown(tty, state);
47186 spin_lock_irqsave(&port->lock, flags);
47187- port->count = 0;
47188+ atomic_set(&port->count, 0);
47189 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
47190 spin_unlock_irqrestore(&port->lock, flags);
47191 tty_port_tty_set(port, NULL);
47192@@ -1550,7 +1550,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
47193 goto end;
47194 }
47195
47196- port->count++;
47197+ atomic_inc(&port->count);
47198 if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
47199 retval = -ENXIO;
47200 goto err_dec_count;
47201@@ -1578,7 +1578,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
47202 /*
47203 * Make sure the device is in D0 state.
47204 */
47205- if (port->count == 1)
47206+ if (atomic_read(&port->count) == 1)
47207 uart_change_pm(state, UART_PM_STATE_ON);
47208
47209 /*
47210@@ -1596,7 +1596,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
47211 end:
47212 return retval;
47213 err_dec_count:
47214- port->count--;
47215+ atomic_inc(&port->count);
47216 mutex_unlock(&port->mutex);
47217 goto end;
47218 }
47219diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
47220index 8eaf1ab..85c030d 100644
47221--- a/drivers/tty/synclink.c
47222+++ b/drivers/tty/synclink.c
47223@@ -3090,7 +3090,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
47224
47225 if (debug_level >= DEBUG_LEVEL_INFO)
47226 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
47227- __FILE__,__LINE__, info->device_name, info->port.count);
47228+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
47229
47230 if (tty_port_close_start(&info->port, tty, filp) == 0)
47231 goto cleanup;
47232@@ -3108,7 +3108,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
47233 cleanup:
47234 if (debug_level >= DEBUG_LEVEL_INFO)
47235 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
47236- tty->driver->name, info->port.count);
47237+ tty->driver->name, atomic_read(&info->port.count));
47238
47239 } /* end of mgsl_close() */
47240
47241@@ -3207,8 +3207,8 @@ static void mgsl_hangup(struct tty_struct *tty)
47242
47243 mgsl_flush_buffer(tty);
47244 shutdown(info);
47245-
47246- info->port.count = 0;
47247+
47248+ atomic_set(&info->port.count, 0);
47249 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
47250 info->port.tty = NULL;
47251
47252@@ -3297,12 +3297,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
47253
47254 if (debug_level >= DEBUG_LEVEL_INFO)
47255 printk("%s(%d):block_til_ready before block on %s count=%d\n",
47256- __FILE__,__LINE__, tty->driver->name, port->count );
47257+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
47258
47259 spin_lock_irqsave(&info->irq_spinlock, flags);
47260 if (!tty_hung_up_p(filp)) {
47261 extra_count = true;
47262- port->count--;
47263+ atomic_dec(&port->count);
47264 }
47265 spin_unlock_irqrestore(&info->irq_spinlock, flags);
47266 port->blocked_open++;
47267@@ -3331,7 +3331,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
47268
47269 if (debug_level >= DEBUG_LEVEL_INFO)
47270 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
47271- __FILE__,__LINE__, tty->driver->name, port->count );
47272+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
47273
47274 tty_unlock(tty);
47275 schedule();
47276@@ -3343,12 +3343,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
47277
47278 /* FIXME: Racy on hangup during close wait */
47279 if (extra_count)
47280- port->count++;
47281+ atomic_inc(&port->count);
47282 port->blocked_open--;
47283
47284 if (debug_level >= DEBUG_LEVEL_INFO)
47285 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
47286- __FILE__,__LINE__, tty->driver->name, port->count );
47287+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
47288
47289 if (!retval)
47290 port->flags |= ASYNC_NORMAL_ACTIVE;
47291@@ -3400,7 +3400,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
47292
47293 if (debug_level >= DEBUG_LEVEL_INFO)
47294 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
47295- __FILE__,__LINE__,tty->driver->name, info->port.count);
47296+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
47297
47298 /* If port is closing, signal caller to try again */
47299 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
47300@@ -3419,10 +3419,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
47301 spin_unlock_irqrestore(&info->netlock, flags);
47302 goto cleanup;
47303 }
47304- info->port.count++;
47305+ atomic_inc(&info->port.count);
47306 spin_unlock_irqrestore(&info->netlock, flags);
47307
47308- if (info->port.count == 1) {
47309+ if (atomic_read(&info->port.count) == 1) {
47310 /* 1st open on this device, init hardware */
47311 retval = startup(info);
47312 if (retval < 0)
47313@@ -3446,8 +3446,8 @@ cleanup:
47314 if (retval) {
47315 if (tty->count == 1)
47316 info->port.tty = NULL; /* tty layer will release tty struct */
47317- if(info->port.count)
47318- info->port.count--;
47319+ if (atomic_read(&info->port.count))
47320+ atomic_dec(&info->port.count);
47321 }
47322
47323 return retval;
47324@@ -7665,7 +7665,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
47325 unsigned short new_crctype;
47326
47327 /* return error if TTY interface open */
47328- if (info->port.count)
47329+ if (atomic_read(&info->port.count))
47330 return -EBUSY;
47331
47332 switch (encoding)
47333@@ -7760,7 +7760,7 @@ static int hdlcdev_open(struct net_device *dev)
47334
47335 /* arbitrate between network and tty opens */
47336 spin_lock_irqsave(&info->netlock, flags);
47337- if (info->port.count != 0 || info->netcount != 0) {
47338+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
47339 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
47340 spin_unlock_irqrestore(&info->netlock, flags);
47341 return -EBUSY;
47342@@ -7846,7 +7846,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
47343 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
47344
47345 /* return error if TTY interface open */
47346- if (info->port.count)
47347+ if (atomic_read(&info->port.count))
47348 return -EBUSY;
47349
47350 if (cmd != SIOCWANDEV)
47351diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
47352index 1abf946..1ee34fc 100644
47353--- a/drivers/tty/synclink_gt.c
47354+++ b/drivers/tty/synclink_gt.c
47355@@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
47356 tty->driver_data = info;
47357 info->port.tty = tty;
47358
47359- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
47360+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
47361
47362 /* If port is closing, signal caller to try again */
47363 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
47364@@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
47365 mutex_unlock(&info->port.mutex);
47366 goto cleanup;
47367 }
47368- info->port.count++;
47369+ atomic_inc(&info->port.count);
47370 spin_unlock_irqrestore(&info->netlock, flags);
47371
47372- if (info->port.count == 1) {
47373+ if (atomic_read(&info->port.count) == 1) {
47374 /* 1st open on this device, init hardware */
47375 retval = startup(info);
47376 if (retval < 0) {
47377@@ -715,8 +715,8 @@ cleanup:
47378 if (retval) {
47379 if (tty->count == 1)
47380 info->port.tty = NULL; /* tty layer will release tty struct */
47381- if(info->port.count)
47382- info->port.count--;
47383+ if(atomic_read(&info->port.count))
47384+ atomic_dec(&info->port.count);
47385 }
47386
47387 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
47388@@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp)
47389
47390 if (sanity_check(info, tty->name, "close"))
47391 return;
47392- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
47393+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
47394
47395 if (tty_port_close_start(&info->port, tty, filp) == 0)
47396 goto cleanup;
47397@@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp)
47398 tty_port_close_end(&info->port, tty);
47399 info->port.tty = NULL;
47400 cleanup:
47401- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
47402+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
47403 }
47404
47405 static void hangup(struct tty_struct *tty)
47406@@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty)
47407 shutdown(info);
47408
47409 spin_lock_irqsave(&info->port.lock, flags);
47410- info->port.count = 0;
47411+ atomic_set(&info->port.count, 0);
47412 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
47413 info->port.tty = NULL;
47414 spin_unlock_irqrestore(&info->port.lock, flags);
47415@@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
47416 unsigned short new_crctype;
47417
47418 /* return error if TTY interface open */
47419- if (info->port.count)
47420+ if (atomic_read(&info->port.count))
47421 return -EBUSY;
47422
47423 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
47424@@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev)
47425
47426 /* arbitrate between network and tty opens */
47427 spin_lock_irqsave(&info->netlock, flags);
47428- if (info->port.count != 0 || info->netcount != 0) {
47429+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
47430 DBGINFO(("%s hdlc_open busy\n", dev->name));
47431 spin_unlock_irqrestore(&info->netlock, flags);
47432 return -EBUSY;
47433@@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
47434 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
47435
47436 /* return error if TTY interface open */
47437- if (info->port.count)
47438+ if (atomic_read(&info->port.count))
47439 return -EBUSY;
47440
47441 if (cmd != SIOCWANDEV)
47442@@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
47443 if (port == NULL)
47444 continue;
47445 spin_lock(&port->lock);
47446- if ((port->port.count || port->netcount) &&
47447+ if ((atomic_read(&port->port.count) || port->netcount) &&
47448 port->pending_bh && !port->bh_running &&
47449 !port->bh_requested) {
47450 DBGISR(("%s bh queued\n", port->device_name));
47451@@ -3302,7 +3302,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
47452 spin_lock_irqsave(&info->lock, flags);
47453 if (!tty_hung_up_p(filp)) {
47454 extra_count = true;
47455- port->count--;
47456+ atomic_dec(&port->count);
47457 }
47458 spin_unlock_irqrestore(&info->lock, flags);
47459 port->blocked_open++;
47460@@ -3339,7 +3339,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
47461 remove_wait_queue(&port->open_wait, &wait);
47462
47463 if (extra_count)
47464- port->count++;
47465+ atomic_inc(&port->count);
47466 port->blocked_open--;
47467
47468 if (!retval)
47469diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
47470index ff17138..e38b41e 100644
47471--- a/drivers/tty/synclinkmp.c
47472+++ b/drivers/tty/synclinkmp.c
47473@@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
47474
47475 if (debug_level >= DEBUG_LEVEL_INFO)
47476 printk("%s(%d):%s open(), old ref count = %d\n",
47477- __FILE__,__LINE__,tty->driver->name, info->port.count);
47478+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
47479
47480 /* If port is closing, signal caller to try again */
47481 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
47482@@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp)
47483 spin_unlock_irqrestore(&info->netlock, flags);
47484 goto cleanup;
47485 }
47486- info->port.count++;
47487+ atomic_inc(&info->port.count);
47488 spin_unlock_irqrestore(&info->netlock, flags);
47489
47490- if (info->port.count == 1) {
47491+ if (atomic_read(&info->port.count) == 1) {
47492 /* 1st open on this device, init hardware */
47493 retval = startup(info);
47494 if (retval < 0)
47495@@ -796,8 +796,8 @@ cleanup:
47496 if (retval) {
47497 if (tty->count == 1)
47498 info->port.tty = NULL; /* tty layer will release tty struct */
47499- if(info->port.count)
47500- info->port.count--;
47501+ if(atomic_read(&info->port.count))
47502+ atomic_dec(&info->port.count);
47503 }
47504
47505 return retval;
47506@@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp)
47507
47508 if (debug_level >= DEBUG_LEVEL_INFO)
47509 printk("%s(%d):%s close() entry, count=%d\n",
47510- __FILE__,__LINE__, info->device_name, info->port.count);
47511+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
47512
47513 if (tty_port_close_start(&info->port, tty, filp) == 0)
47514 goto cleanup;
47515@@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp)
47516 cleanup:
47517 if (debug_level >= DEBUG_LEVEL_INFO)
47518 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
47519- tty->driver->name, info->port.count);
47520+ tty->driver->name, atomic_read(&info->port.count));
47521 }
47522
47523 /* Called by tty_hangup() when a hangup is signaled.
47524@@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty)
47525 shutdown(info);
47526
47527 spin_lock_irqsave(&info->port.lock, flags);
47528- info->port.count = 0;
47529+ atomic_set(&info->port.count, 0);
47530 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
47531 info->port.tty = NULL;
47532 spin_unlock_irqrestore(&info->port.lock, flags);
47533@@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
47534 unsigned short new_crctype;
47535
47536 /* return error if TTY interface open */
47537- if (info->port.count)
47538+ if (atomic_read(&info->port.count))
47539 return -EBUSY;
47540
47541 switch (encoding)
47542@@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev)
47543
47544 /* arbitrate between network and tty opens */
47545 spin_lock_irqsave(&info->netlock, flags);
47546- if (info->port.count != 0 || info->netcount != 0) {
47547+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
47548 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
47549 spin_unlock_irqrestore(&info->netlock, flags);
47550 return -EBUSY;
47551@@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
47552 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
47553
47554 /* return error if TTY interface open */
47555- if (info->port.count)
47556+ if (atomic_read(&info->port.count))
47557 return -EBUSY;
47558
47559 if (cmd != SIOCWANDEV)
47560@@ -2620,7 +2620,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
47561 * do not request bottom half processing if the
47562 * device is not open in a normal mode.
47563 */
47564- if ( port && (port->port.count || port->netcount) &&
47565+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
47566 port->pending_bh && !port->bh_running &&
47567 !port->bh_requested ) {
47568 if ( debug_level >= DEBUG_LEVEL_ISR )
47569@@ -3318,12 +3318,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
47570
47571 if (debug_level >= DEBUG_LEVEL_INFO)
47572 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
47573- __FILE__,__LINE__, tty->driver->name, port->count );
47574+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
47575
47576 spin_lock_irqsave(&info->lock, flags);
47577 if (!tty_hung_up_p(filp)) {
47578 extra_count = true;
47579- port->count--;
47580+ atomic_dec(&port->count);
47581 }
47582 spin_unlock_irqrestore(&info->lock, flags);
47583 port->blocked_open++;
47584@@ -3352,7 +3352,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
47585
47586 if (debug_level >= DEBUG_LEVEL_INFO)
47587 printk("%s(%d):%s block_til_ready() count=%d\n",
47588- __FILE__,__LINE__, tty->driver->name, port->count );
47589+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
47590
47591 tty_unlock(tty);
47592 schedule();
47593@@ -3363,12 +3363,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
47594 remove_wait_queue(&port->open_wait, &wait);
47595
47596 if (extra_count)
47597- port->count++;
47598+ atomic_inc(&port->count);
47599 port->blocked_open--;
47600
47601 if (debug_level >= DEBUG_LEVEL_INFO)
47602 printk("%s(%d):%s block_til_ready() after, count=%d\n",
47603- __FILE__,__LINE__, tty->driver->name, port->count );
47604+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
47605
47606 if (!retval)
47607 port->flags |= ASYNC_NORMAL_ACTIVE;
47608diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
47609index b51c154..17d55d1 100644
47610--- a/drivers/tty/sysrq.c
47611+++ b/drivers/tty/sysrq.c
47612@@ -1022,7 +1022,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
47613 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
47614 size_t count, loff_t *ppos)
47615 {
47616- if (count) {
47617+ if (count && capable(CAP_SYS_ADMIN)) {
47618 char c;
47619
47620 if (get_user(c, buf))
47621diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
47622index 4476682..d77e748 100644
47623--- a/drivers/tty/tty_io.c
47624+++ b/drivers/tty/tty_io.c
47625@@ -3466,7 +3466,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
47626
47627 void tty_default_fops(struct file_operations *fops)
47628 {
47629- *fops = tty_fops;
47630+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
47631 }
47632
47633 /*
47634diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
47635index 1afe192..73d2c20 100644
47636--- a/drivers/tty/tty_ldisc.c
47637+++ b/drivers/tty/tty_ldisc.c
47638@@ -66,7 +66,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
47639 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
47640 tty_ldiscs[disc] = new_ldisc;
47641 new_ldisc->num = disc;
47642- new_ldisc->refcount = 0;
47643+ atomic_set(&new_ldisc->refcount, 0);
47644 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
47645
47646 return ret;
47647@@ -94,7 +94,7 @@ int tty_unregister_ldisc(int disc)
47648 return -EINVAL;
47649
47650 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
47651- if (tty_ldiscs[disc]->refcount)
47652+ if (atomic_read(&tty_ldiscs[disc]->refcount))
47653 ret = -EBUSY;
47654 else
47655 tty_ldiscs[disc] = NULL;
47656@@ -115,7 +115,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
47657 if (ldops) {
47658 ret = ERR_PTR(-EAGAIN);
47659 if (try_module_get(ldops->owner)) {
47660- ldops->refcount++;
47661+ atomic_inc(&ldops->refcount);
47662 ret = ldops;
47663 }
47664 }
47665@@ -128,7 +128,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
47666 unsigned long flags;
47667
47668 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
47669- ldops->refcount--;
47670+ atomic_dec(&ldops->refcount);
47671 module_put(ldops->owner);
47672 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
47673 }
47674@@ -196,7 +196,7 @@ static inline void tty_ldisc_put(struct tty_ldisc *ld)
47675 /* unreleased reader reference(s) will cause this WARN */
47676 WARN_ON(!atomic_dec_and_test(&ld->users));
47677
47678- ld->ops->refcount--;
47679+ atomic_dec(&ld->ops->refcount);
47680 module_put(ld->ops->owner);
47681 kfree(ld);
47682 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
47683diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
47684index f597e88..b7f68ed 100644
47685--- a/drivers/tty/tty_port.c
47686+++ b/drivers/tty/tty_port.c
47687@@ -232,7 +232,7 @@ void tty_port_hangup(struct tty_port *port)
47688 unsigned long flags;
47689
47690 spin_lock_irqsave(&port->lock, flags);
47691- port->count = 0;
47692+ atomic_set(&port->count, 0);
47693 port->flags &= ~ASYNC_NORMAL_ACTIVE;
47694 tty = port->tty;
47695 if (tty)
47696@@ -390,7 +390,7 @@ int tty_port_block_til_ready(struct tty_port *port,
47697 /* The port lock protects the port counts */
47698 spin_lock_irqsave(&port->lock, flags);
47699 if (!tty_hung_up_p(filp))
47700- port->count--;
47701+ atomic_dec(&port->count);
47702 port->blocked_open++;
47703 spin_unlock_irqrestore(&port->lock, flags);
47704
47705@@ -432,7 +432,7 @@ int tty_port_block_til_ready(struct tty_port *port,
47706 we must not mess that up further */
47707 spin_lock_irqsave(&port->lock, flags);
47708 if (!tty_hung_up_p(filp))
47709- port->count++;
47710+ atomic_inc(&port->count);
47711 port->blocked_open--;
47712 if (retval == 0)
47713 port->flags |= ASYNC_NORMAL_ACTIVE;
47714@@ -466,19 +466,19 @@ int tty_port_close_start(struct tty_port *port,
47715 return 0;
47716 }
47717
47718- if (tty->count == 1 && port->count != 1) {
47719+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
47720 printk(KERN_WARNING
47721 "tty_port_close_start: tty->count = 1 port count = %d.\n",
47722- port->count);
47723- port->count = 1;
47724+ atomic_read(&port->count));
47725+ atomic_set(&port->count, 1);
47726 }
47727- if (--port->count < 0) {
47728+ if (atomic_dec_return(&port->count) < 0) {
47729 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
47730- port->count);
47731- port->count = 0;
47732+ atomic_read(&port->count));
47733+ atomic_set(&port->count, 0);
47734 }
47735
47736- if (port->count) {
47737+ if (atomic_read(&port->count)) {
47738 spin_unlock_irqrestore(&port->lock, flags);
47739 if (port->ops->drop)
47740 port->ops->drop(port);
47741@@ -564,7 +564,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
47742 {
47743 spin_lock_irq(&port->lock);
47744 if (!tty_hung_up_p(filp))
47745- ++port->count;
47746+ atomic_inc(&port->count);
47747 spin_unlock_irq(&port->lock);
47748 tty_port_tty_set(port, tty);
47749
47750diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
47751index a9af1b9a..1e08e7f 100644
47752--- a/drivers/tty/vt/keyboard.c
47753+++ b/drivers/tty/vt/keyboard.c
47754@@ -647,6 +647,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
47755 kbd->kbdmode == VC_OFF) &&
47756 value != KVAL(K_SAK))
47757 return; /* SAK is allowed even in raw mode */
47758+
47759+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
47760+ {
47761+ void *func = fn_handler[value];
47762+ if (func == fn_show_state || func == fn_show_ptregs ||
47763+ func == fn_show_mem)
47764+ return;
47765+ }
47766+#endif
47767+
47768 fn_handler[value](vc);
47769 }
47770
47771@@ -1795,9 +1805,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
47772 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
47773 return -EFAULT;
47774
47775- if (!capable(CAP_SYS_TTY_CONFIG))
47776- perm = 0;
47777-
47778 switch (cmd) {
47779 case KDGKBENT:
47780 /* Ensure another thread doesn't free it under us */
47781@@ -1812,6 +1819,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
47782 spin_unlock_irqrestore(&kbd_event_lock, flags);
47783 return put_user(val, &user_kbe->kb_value);
47784 case KDSKBENT:
47785+ if (!capable(CAP_SYS_TTY_CONFIG))
47786+ perm = 0;
47787+
47788 if (!perm)
47789 return -EPERM;
47790 if (!i && v == K_NOSUCHMAP) {
47791@@ -1902,9 +1912,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
47792 int i, j, k;
47793 int ret;
47794
47795- if (!capable(CAP_SYS_TTY_CONFIG))
47796- perm = 0;
47797-
47798 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
47799 if (!kbs) {
47800 ret = -ENOMEM;
47801@@ -1938,6 +1945,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
47802 kfree(kbs);
47803 return ((p && *p) ? -EOVERFLOW : 0);
47804 case KDSKBSENT:
47805+ if (!capable(CAP_SYS_TTY_CONFIG))
47806+ perm = 0;
47807+
47808 if (!perm) {
47809 ret = -EPERM;
47810 goto reterr;
47811diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
47812index b645c47..a55c182 100644
47813--- a/drivers/uio/uio.c
47814+++ b/drivers/uio/uio.c
47815@@ -25,6 +25,7 @@
47816 #include <linux/kobject.h>
47817 #include <linux/cdev.h>
47818 #include <linux/uio_driver.h>
47819+#include <asm/local.h>
47820
47821 #define UIO_MAX_DEVICES (1U << MINORBITS)
47822
47823@@ -32,10 +33,10 @@ struct uio_device {
47824 struct module *owner;
47825 struct device *dev;
47826 int minor;
47827- atomic_t event;
47828+ atomic_unchecked_t event;
47829 struct fasync_struct *async_queue;
47830 wait_queue_head_t wait;
47831- int vma_count;
47832+ local_t vma_count;
47833 struct uio_info *info;
47834 struct kobject *map_dir;
47835 struct kobject *portio_dir;
47836@@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
47837 struct device_attribute *attr, char *buf)
47838 {
47839 struct uio_device *idev = dev_get_drvdata(dev);
47840- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
47841+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
47842 }
47843
47844 static struct device_attribute uio_class_attributes[] = {
47845@@ -398,7 +399,7 @@ void uio_event_notify(struct uio_info *info)
47846 {
47847 struct uio_device *idev = info->uio_dev;
47848
47849- atomic_inc(&idev->event);
47850+ atomic_inc_unchecked(&idev->event);
47851 wake_up_interruptible(&idev->wait);
47852 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
47853 }
47854@@ -451,7 +452,7 @@ static int uio_open(struct inode *inode, struct file *filep)
47855 }
47856
47857 listener->dev = idev;
47858- listener->event_count = atomic_read(&idev->event);
47859+ listener->event_count = atomic_read_unchecked(&idev->event);
47860 filep->private_data = listener;
47861
47862 if (idev->info->open) {
47863@@ -502,7 +503,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
47864 return -EIO;
47865
47866 poll_wait(filep, &idev->wait, wait);
47867- if (listener->event_count != atomic_read(&idev->event))
47868+ if (listener->event_count != atomic_read_unchecked(&idev->event))
47869 return POLLIN | POLLRDNORM;
47870 return 0;
47871 }
47872@@ -527,7 +528,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
47873 do {
47874 set_current_state(TASK_INTERRUPTIBLE);
47875
47876- event_count = atomic_read(&idev->event);
47877+ event_count = atomic_read_unchecked(&idev->event);
47878 if (event_count != listener->event_count) {
47879 if (copy_to_user(buf, &event_count, count))
47880 retval = -EFAULT;
47881@@ -596,13 +597,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
47882 static void uio_vma_open(struct vm_area_struct *vma)
47883 {
47884 struct uio_device *idev = vma->vm_private_data;
47885- idev->vma_count++;
47886+ local_inc(&idev->vma_count);
47887 }
47888
47889 static void uio_vma_close(struct vm_area_struct *vma)
47890 {
47891 struct uio_device *idev = vma->vm_private_data;
47892- idev->vma_count--;
47893+ local_dec(&idev->vma_count);
47894 }
47895
47896 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
47897@@ -809,7 +810,7 @@ int __uio_register_device(struct module *owner,
47898 idev->owner = owner;
47899 idev->info = info;
47900 init_waitqueue_head(&idev->wait);
47901- atomic_set(&idev->event, 0);
47902+ atomic_set_unchecked(&idev->event, 0);
47903
47904 ret = uio_get_minor(idev);
47905 if (ret)
47906diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
47907index 8a7eb77..c00402f 100644
47908--- a/drivers/usb/atm/cxacru.c
47909+++ b/drivers/usb/atm/cxacru.c
47910@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
47911 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
47912 if (ret < 2)
47913 return -EINVAL;
47914- if (index < 0 || index > 0x7f)
47915+ if (index > 0x7f)
47916 return -EINVAL;
47917 pos += tmp;
47918
47919diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
47920index d3527dd..26effa2 100644
47921--- a/drivers/usb/atm/usbatm.c
47922+++ b/drivers/usb/atm/usbatm.c
47923@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
47924 if (printk_ratelimit())
47925 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
47926 __func__, vpi, vci);
47927- atomic_inc(&vcc->stats->rx_err);
47928+ atomic_inc_unchecked(&vcc->stats->rx_err);
47929 return;
47930 }
47931
47932@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
47933 if (length > ATM_MAX_AAL5_PDU) {
47934 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
47935 __func__, length, vcc);
47936- atomic_inc(&vcc->stats->rx_err);
47937+ atomic_inc_unchecked(&vcc->stats->rx_err);
47938 goto out;
47939 }
47940
47941@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
47942 if (sarb->len < pdu_length) {
47943 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
47944 __func__, pdu_length, sarb->len, vcc);
47945- atomic_inc(&vcc->stats->rx_err);
47946+ atomic_inc_unchecked(&vcc->stats->rx_err);
47947 goto out;
47948 }
47949
47950 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
47951 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
47952 __func__, vcc);
47953- atomic_inc(&vcc->stats->rx_err);
47954+ atomic_inc_unchecked(&vcc->stats->rx_err);
47955 goto out;
47956 }
47957
47958@@ -389,7 +389,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
47959 if (printk_ratelimit())
47960 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
47961 __func__, length);
47962- atomic_inc(&vcc->stats->rx_drop);
47963+ atomic_inc_unchecked(&vcc->stats->rx_drop);
47964 goto out;
47965 }
47966
47967@@ -417,7 +417,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
47968
47969 vcc->push(vcc, skb);
47970
47971- atomic_inc(&vcc->stats->rx);
47972+ atomic_inc_unchecked(&vcc->stats->rx);
47973 out:
47974 skb_trim(sarb, 0);
47975 }
47976@@ -623,7 +623,7 @@ static void usbatm_tx_process(unsigned long data)
47977 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
47978
47979 usbatm_pop(vcc, skb);
47980- atomic_inc(&vcc->stats->tx);
47981+ atomic_inc_unchecked(&vcc->stats->tx);
47982
47983 skb = skb_dequeue(&instance->sndqueue);
47984 }
47985@@ -779,11 +779,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
47986 if (!left--)
47987 return sprintf(page,
47988 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
47989- atomic_read(&atm_dev->stats.aal5.tx),
47990- atomic_read(&atm_dev->stats.aal5.tx_err),
47991- atomic_read(&atm_dev->stats.aal5.rx),
47992- atomic_read(&atm_dev->stats.aal5.rx_err),
47993- atomic_read(&atm_dev->stats.aal5.rx_drop));
47994+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
47995+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
47996+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
47997+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
47998+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
47999
48000 if (!left--) {
48001 if (instance->disconnected)
48002diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
48003index 2a3bbdf..91d72cf 100644
48004--- a/drivers/usb/core/devices.c
48005+++ b/drivers/usb/core/devices.c
48006@@ -126,7 +126,7 @@ static const char format_endpt[] =
48007 * time it gets called.
48008 */
48009 static struct device_connect_event {
48010- atomic_t count;
48011+ atomic_unchecked_t count;
48012 wait_queue_head_t wait;
48013 } device_event = {
48014 .count = ATOMIC_INIT(1),
48015@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
48016
48017 void usbfs_conn_disc_event(void)
48018 {
48019- atomic_add(2, &device_event.count);
48020+ atomic_add_unchecked(2, &device_event.count);
48021 wake_up(&device_event.wait);
48022 }
48023
48024@@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file,
48025
48026 poll_wait(file, &device_event.wait, wait);
48027
48028- event_count = atomic_read(&device_event.count);
48029+ event_count = atomic_read_unchecked(&device_event.count);
48030 if (file->f_version != event_count) {
48031 file->f_version = event_count;
48032 return POLLIN | POLLRDNORM;
48033diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
48034index d53547d..6a22d02 100644
48035--- a/drivers/usb/core/hcd.c
48036+++ b/drivers/usb/core/hcd.c
48037@@ -1526,7 +1526,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
48038 */
48039 usb_get_urb(urb);
48040 atomic_inc(&urb->use_count);
48041- atomic_inc(&urb->dev->urbnum);
48042+ atomic_inc_unchecked(&urb->dev->urbnum);
48043 usbmon_urb_submit(&hcd->self, urb);
48044
48045 /* NOTE requirements on root-hub callers (usbfs and the hub
48046@@ -1553,7 +1553,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
48047 urb->hcpriv = NULL;
48048 INIT_LIST_HEAD(&urb->urb_list);
48049 atomic_dec(&urb->use_count);
48050- atomic_dec(&urb->dev->urbnum);
48051+ atomic_dec_unchecked(&urb->dev->urbnum);
48052 if (atomic_read(&urb->reject))
48053 wake_up(&usb_kill_urb_queue);
48054 usb_put_urb(urb);
e2b79cd1
AF
48055diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
48056index da2905a..834a569 100644
48057--- a/drivers/usb/core/hub.c
48058+++ b/drivers/usb/core/hub.c
48059@@ -27,6 +27,7 @@
48060 #include <linux/freezer.h>
48061 #include <linux/random.h>
48062 #include <linux/pm_qos.h>
48063+#include <linux/grsecurity.h>
48064
48065 #include <asm/uaccess.h>
48066 #include <asm/byteorder.h>
48067@@ -4424,6 +4425,10 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
48068 goto done;
48069 return;
48070 }
48071+
48072+ if (gr_handle_new_usb())
48073+ goto done;
48074+
48075 if (hub_is_superspeed(hub->hdev))
48076 unit_load = 150;
48077 else
bb5f0bf8
AF
48078diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
48079index 444d30e..f15c850 100644
48080--- a/drivers/usb/core/message.c
48081+++ b/drivers/usb/core/message.c
48082@@ -129,7 +129,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
48083 * method can wait for it to complete. Since you don't have a handle on the
48084 * URB used, you can't cancel the request.
48085 */
48086-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
48087+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
48088 __u8 requesttype, __u16 value, __u16 index, void *data,
48089 __u16 size, int timeout)
48090 {
48091diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
48092index aa38db4..0a08682 100644
48093--- a/drivers/usb/core/sysfs.c
48094+++ b/drivers/usb/core/sysfs.c
48095@@ -239,7 +239,7 @@ show_urbnum(struct device *dev, struct device_attribute *attr, char *buf)
48096 struct usb_device *udev;
48097
48098 udev = to_usb_device(dev);
48099- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
48100+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
48101 }
48102 static DEVICE_ATTR(urbnum, S_IRUGO, show_urbnum, NULL);
48103
48104diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
48105index b10da72..43aa0b2 100644
48106--- a/drivers/usb/core/usb.c
48107+++ b/drivers/usb/core/usb.c
48108@@ -389,7 +389,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
48109 set_dev_node(&dev->dev, dev_to_node(bus->controller));
48110 dev->state = USB_STATE_ATTACHED;
48111 dev->lpm_disable_count = 1;
48112- atomic_set(&dev->urbnum, 0);
48113+ atomic_set_unchecked(&dev->urbnum, 0);
48114
48115 INIT_LIST_HEAD(&dev->ep0.urb_list);
48116 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
e2b79cd1
AF
48117diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
48118index f77083f..f3e2e34 100644
48119--- a/drivers/usb/dwc3/gadget.c
48120+++ b/drivers/usb/dwc3/gadget.c
48121@@ -550,8 +550,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
48122 if (!usb_endpoint_xfer_isoc(desc))
48123 return 0;
48124
48125- memset(&trb_link, 0, sizeof(trb_link));
48126-
48127 /* Link TRB for ISOC. The HWO bit is never reset */
48128 trb_st_hw = &dep->trb_pool[0];
48129
bb5f0bf8
AF
48130diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
48131index 5e29dde..eca992f 100644
48132--- a/drivers/usb/early/ehci-dbgp.c
48133+++ b/drivers/usb/early/ehci-dbgp.c
48134@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
48135
48136 #ifdef CONFIG_KGDB
48137 static struct kgdb_io kgdbdbgp_io_ops;
48138-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
48139+static struct kgdb_io kgdbdbgp_io_ops_console;
48140+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
48141 #else
48142 #define dbgp_kgdb_mode (0)
48143 #endif
48144@@ -1047,6 +1048,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
48145 .write_char = kgdbdbgp_write_char,
48146 };
48147
48148+static struct kgdb_io kgdbdbgp_io_ops_console = {
48149+ .name = "kgdbdbgp",
48150+ .read_char = kgdbdbgp_read_char,
48151+ .write_char = kgdbdbgp_write_char,
48152+ .is_console = 1
48153+};
48154+
48155 static int kgdbdbgp_wait_time;
48156
48157 static int __init kgdbdbgp_parse_config(char *str)
48158@@ -1062,8 +1070,10 @@ static int __init kgdbdbgp_parse_config(char *str)
48159 ptr++;
48160 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
48161 }
48162- kgdb_register_io_module(&kgdbdbgp_io_ops);
48163- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
48164+ if (early_dbgp_console.index != -1)
48165+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
48166+ else
48167+ kgdb_register_io_module(&kgdbdbgp_io_ops);
48168
48169 return 0;
48170 }
48171diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
48172index b369292..9f3ba40 100644
48173--- a/drivers/usb/gadget/u_serial.c
48174+++ b/drivers/usb/gadget/u_serial.c
48175@@ -733,9 +733,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
48176 spin_lock_irq(&port->port_lock);
48177
48178 /* already open? Great. */
48179- if (port->port.count) {
48180+ if (atomic_read(&port->port.count)) {
48181 status = 0;
48182- port->port.count++;
48183+ atomic_inc(&port->port.count);
48184
48185 /* currently opening/closing? wait ... */
48186 } else if (port->openclose) {
48187@@ -794,7 +794,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
48188 tty->driver_data = port;
48189 port->port.tty = tty;
48190
48191- port->port.count = 1;
48192+ atomic_set(&port->port.count, 1);
48193 port->openclose = false;
48194
48195 /* if connected, start the I/O stream */
48196@@ -836,11 +836,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
48197
48198 spin_lock_irq(&port->port_lock);
48199
48200- if (port->port.count != 1) {
48201- if (port->port.count == 0)
48202+ if (atomic_read(&port->port.count) != 1) {
48203+ if (atomic_read(&port->port.count) == 0)
48204 WARN_ON(1);
48205 else
48206- --port->port.count;
48207+ atomic_dec(&port->port.count);
48208 goto exit;
48209 }
48210
48211@@ -850,7 +850,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
48212 * and sleep if necessary
48213 */
48214 port->openclose = true;
48215- port->port.count = 0;
48216+ atomic_set(&port->port.count, 0);
48217
48218 gser = port->port_usb;
48219 if (gser && gser->disconnect)
48220@@ -1066,7 +1066,7 @@ static int gs_closed(struct gs_port *port)
48221 int cond;
48222
48223 spin_lock_irq(&port->port_lock);
48224- cond = (port->port.count == 0) && !port->openclose;
48225+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
48226 spin_unlock_irq(&port->port_lock);
48227 return cond;
48228 }
48229@@ -1209,7 +1209,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
48230 /* if it's already open, start I/O ... and notify the serial
48231 * protocol about open/close status (connect/disconnect).
48232 */
48233- if (port->port.count) {
48234+ if (atomic_read(&port->port.count)) {
48235 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
48236 gs_start_io(port);
48237 if (gser->connect)
48238@@ -1256,7 +1256,7 @@ void gserial_disconnect(struct gserial *gser)
48239
48240 port->port_usb = NULL;
48241 gser->ioport = NULL;
48242- if (port->port.count > 0 || port->openclose) {
48243+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
48244 wake_up_interruptible(&port->drain_wait);
48245 if (port->port.tty)
48246 tty_hangup(port->port.tty);
48247@@ -1272,7 +1272,7 @@ void gserial_disconnect(struct gserial *gser)
48248
48249 /* finally, free any unused/unusable I/O buffers */
48250 spin_lock_irqsave(&port->port_lock, flags);
48251- if (port->port.count == 0 && !port->openclose)
48252+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
48253 gs_buf_free(&port->port_write_buf);
48254 gs_free_requests(gser->out, &port->read_pool, NULL);
48255 gs_free_requests(gser->out, &port->read_queue, NULL);
48256diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
48257index 5f3bcd3..bfca43f 100644
48258--- a/drivers/usb/serial/console.c
48259+++ b/drivers/usb/serial/console.c
48260@@ -124,7 +124,7 @@ static int usb_console_setup(struct console *co, char *options)
48261
48262 info->port = port;
48263
48264- ++port->port.count;
48265+ atomic_inc(&port->port.count);
48266 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
48267 if (serial->type->set_termios) {
48268 /*
48269@@ -174,7 +174,7 @@ static int usb_console_setup(struct console *co, char *options)
48270 }
48271 /* Now that any required fake tty operations are completed restore
48272 * the tty port count */
48273- --port->port.count;
48274+ atomic_dec(&port->port.count);
48275 /* The console is special in terms of closing the device so
48276 * indicate this port is now acting as a system console. */
48277 port->port.console = 1;
48278@@ -187,7 +187,7 @@ static int usb_console_setup(struct console *co, char *options)
48279 free_tty:
48280 kfree(tty);
48281 reset_open_count:
48282- port->port.count = 0;
48283+ atomic_set(&port->port.count, 0);
48284 usb_autopm_put_interface(serial->interface);
48285 error_get_interface:
48286 usb_serial_put(serial);
48287diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
48288index 75f70f0..d467e1a 100644
48289--- a/drivers/usb/storage/usb.h
48290+++ b/drivers/usb/storage/usb.h
48291@@ -63,7 +63,7 @@ struct us_unusual_dev {
48292 __u8 useProtocol;
48293 __u8 useTransport;
48294 int (*initFunction)(struct us_data *);
48295-};
48296+} __do_const;
48297
48298
48299 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
48300diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
48301index d6bea3e..60b250e 100644
48302--- a/drivers/usb/wusbcore/wa-hc.h
48303+++ b/drivers/usb/wusbcore/wa-hc.h
48304@@ -192,7 +192,7 @@ struct wahc {
48305 struct list_head xfer_delayed_list;
48306 spinlock_t xfer_list_lock;
48307 struct work_struct xfer_work;
48308- atomic_t xfer_id_count;
48309+ atomic_unchecked_t xfer_id_count;
48310 };
48311
48312
48313@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
48314 INIT_LIST_HEAD(&wa->xfer_delayed_list);
48315 spin_lock_init(&wa->xfer_list_lock);
48316 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
48317- atomic_set(&wa->xfer_id_count, 1);
48318+ atomic_set_unchecked(&wa->xfer_id_count, 1);
48319 }
48320
48321 /**
48322diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
48323index 028fc83..65bb105 100644
48324--- a/drivers/usb/wusbcore/wa-xfer.c
48325+++ b/drivers/usb/wusbcore/wa-xfer.c
48326@@ -296,7 +296,7 @@ out:
48327 */
48328 static void wa_xfer_id_init(struct wa_xfer *xfer)
48329 {
48330- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
48331+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
48332 }
48333
48334 /*
48335diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
48336index 5174eba..86e764a 100644
48337--- a/drivers/vhost/vringh.c
48338+++ b/drivers/vhost/vringh.c
48339@@ -800,7 +800,7 @@ static inline int getu16_kern(u16 *val, const u16 *p)
48340
48341 static inline int putu16_kern(u16 *p, u16 val)
48342 {
48343- ACCESS_ONCE(*p) = val;
48344+ ACCESS_ONCE_RW(*p) = val;
48345 return 0;
48346 }
48347
48348diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
48349index 8c55011..eed4ae1a 100644
48350--- a/drivers/video/aty/aty128fb.c
48351+++ b/drivers/video/aty/aty128fb.c
48352@@ -149,7 +149,7 @@ enum {
48353 };
48354
48355 /* Must match above enum */
48356-static char * const r128_family[] = {
48357+static const char * const r128_family[] = {
48358 "AGP",
48359 "PCI",
48360 "PRO AGP",
48361diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
48362index 4f27fdc..d3537e6 100644
48363--- a/drivers/video/aty/atyfb_base.c
48364+++ b/drivers/video/aty/atyfb_base.c
48365@@ -1325,10 +1325,14 @@ static int atyfb_set_par(struct fb_info *info)
48366 par->accel_flags = var->accel_flags; /* hack */
48367
48368 if (var->accel_flags) {
48369- info->fbops->fb_sync = atyfb_sync;
48370+ pax_open_kernel();
48371+ *(void **)&info->fbops->fb_sync = atyfb_sync;
48372+ pax_close_kernel();
48373 info->flags &= ~FBINFO_HWACCEL_DISABLED;
48374 } else {
48375- info->fbops->fb_sync = NULL;
48376+ pax_open_kernel();
48377+ *(void **)&info->fbops->fb_sync = NULL;
48378+ pax_close_kernel();
48379 info->flags |= FBINFO_HWACCEL_DISABLED;
48380 }
48381
48382diff --git a/drivers/video/aty/mach64_cursor.c b/drivers/video/aty/mach64_cursor.c
48383index 95ec042..e6affdd 100644
48384--- a/drivers/video/aty/mach64_cursor.c
48385+++ b/drivers/video/aty/mach64_cursor.c
48386@@ -7,6 +7,7 @@
48387 #include <linux/string.h>
48388
48389 #include <asm/io.h>
48390+#include <asm/pgtable.h>
48391
48392 #ifdef __sparc__
48393 #include <asm/fbio.h>
48394@@ -208,7 +209,9 @@ int aty_init_cursor(struct fb_info *info)
48395 info->sprite.buf_align = 16; /* and 64 lines tall. */
48396 info->sprite.flags = FB_PIXMAP_IO;
48397
48398- info->fbops->fb_cursor = atyfb_cursor;
48399+ pax_open_kernel();
48400+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
48401+ pax_close_kernel();
48402
48403 return 0;
48404 }
48405diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
48406index c74e7aa..e3c2790 100644
48407--- a/drivers/video/backlight/backlight.c
48408+++ b/drivers/video/backlight/backlight.c
48409@@ -304,7 +304,7 @@ struct backlight_device *backlight_device_register(const char *name,
48410 new_bd->dev.class = backlight_class;
48411 new_bd->dev.parent = parent;
48412 new_bd->dev.release = bl_device_release;
48413- dev_set_name(&new_bd->dev, name);
48414+ dev_set_name(&new_bd->dev, "%s", name);
48415 dev_set_drvdata(&new_bd->dev, devdata);
48416
48417 /* Set default properties */
48418diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
48419index bca6ccc..252107e 100644
48420--- a/drivers/video/backlight/kb3886_bl.c
48421+++ b/drivers/video/backlight/kb3886_bl.c
48422@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
48423 static unsigned long kb3886bl_flags;
48424 #define KB3886BL_SUSPENDED 0x01
48425
48426-static struct dmi_system_id __initdata kb3886bl_device_table[] = {
48427+static const struct dmi_system_id __initconst kb3886bl_device_table[] = {
48428 {
48429 .ident = "Sahara Touch-iT",
48430 .matches = {
48431diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
48432index 34fb6bd..3649fd9 100644
48433--- a/drivers/video/backlight/lcd.c
48434+++ b/drivers/video/backlight/lcd.c
48435@@ -219,7 +219,7 @@ struct lcd_device *lcd_device_register(const char *name, struct device *parent,
48436 new_ld->dev.class = lcd_class;
48437 new_ld->dev.parent = parent;
48438 new_ld->dev.release = lcd_device_release;
48439- dev_set_name(&new_ld->dev, name);
48440+ dev_set_name(&new_ld->dev, "%s", name);
48441 dev_set_drvdata(&new_ld->dev, devdata);
48442
48443 rc = device_register(&new_ld->dev);
48444diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
48445index 900aa4e..6d49418 100644
48446--- a/drivers/video/fb_defio.c
48447+++ b/drivers/video/fb_defio.c
48448@@ -206,7 +206,9 @@ void fb_deferred_io_init(struct fb_info *info)
48449
48450 BUG_ON(!fbdefio);
48451 mutex_init(&fbdefio->lock);
48452- info->fbops->fb_mmap = fb_deferred_io_mmap;
48453+ pax_open_kernel();
48454+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
48455+ pax_close_kernel();
48456 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
48457 INIT_LIST_HEAD(&fbdefio->pagelist);
48458 if (fbdefio->delay == 0) /* set a default of 1 s */
48459@@ -237,7 +239,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
48460 page->mapping = NULL;
48461 }
48462
48463- info->fbops->fb_mmap = NULL;
48464+ *(void **)&info->fbops->fb_mmap = NULL;
48465 mutex_destroy(&fbdefio->lock);
48466 }
48467 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
48468diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
48469index 5c3960d..15cf8fc 100644
48470--- a/drivers/video/fbcmap.c
48471+++ b/drivers/video/fbcmap.c
48472@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
48473 rc = -ENODEV;
48474 goto out;
48475 }
48476- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
48477- !info->fbops->fb_setcmap)) {
48478+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
48479 rc = -EINVAL;
48480 goto out1;
48481 }
48482diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
48483index 098bfc6..796841d 100644
48484--- a/drivers/video/fbmem.c
48485+++ b/drivers/video/fbmem.c
48486@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
48487 image->dx += image->width + 8;
48488 }
48489 } else if (rotate == FB_ROTATE_UD) {
48490- for (x = 0; x < num && image->dx >= 0; x++) {
48491+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
48492 info->fbops->fb_imageblit(info, image);
48493 image->dx -= image->width + 8;
48494 }
48495@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
48496 image->dy += image->height + 8;
48497 }
48498 } else if (rotate == FB_ROTATE_CCW) {
48499- for (x = 0; x < num && image->dy >= 0; x++) {
48500+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
48501 info->fbops->fb_imageblit(info, image);
48502 image->dy -= image->height + 8;
48503 }
48504@@ -1166,7 +1166,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
48505 return -EFAULT;
48506 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
48507 return -EINVAL;
48508- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
48509+ if (con2fb.framebuffer >= FB_MAX)
48510 return -EINVAL;
48511 if (!registered_fb[con2fb.framebuffer])
48512 request_module("fb%d", con2fb.framebuffer);
48513diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
48514index 7672d2e..b56437f 100644
48515--- a/drivers/video/i810/i810_accel.c
48516+++ b/drivers/video/i810/i810_accel.c
48517@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
48518 }
48519 }
48520 printk("ringbuffer lockup!!!\n");
48521+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
48522 i810_report_error(mmio);
48523 par->dev_flags |= LOCKUP;
48524 info->pixmap.scan_align = 1;
48525diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
48526index 3c14e43..eafa544 100644
48527--- a/drivers/video/logo/logo_linux_clut224.ppm
48528+++ b/drivers/video/logo/logo_linux_clut224.ppm
48529@@ -1,1604 +1,1123 @@
48530 P3
48531-# Standard 224-color Linux logo
48532 80 80
48533 255
48534- 0 0 0 0 0 0 0 0 0 0 0 0
48535- 0 0 0 0 0 0 0 0 0 0 0 0
48536- 0 0 0 0 0 0 0 0 0 0 0 0
48537- 0 0 0 0 0 0 0 0 0 0 0 0
48538- 0 0 0 0 0 0 0 0 0 0 0 0
48539- 0 0 0 0 0 0 0 0 0 0 0 0
48540- 0 0 0 0 0 0 0 0 0 0 0 0
48541- 0 0 0 0 0 0 0 0 0 0 0 0
48542- 0 0 0 0 0 0 0 0 0 0 0 0
48543- 6 6 6 6 6 6 10 10 10 10 10 10
48544- 10 10 10 6 6 6 6 6 6 6 6 6
48545- 0 0 0 0 0 0 0 0 0 0 0 0
48546- 0 0 0 0 0 0 0 0 0 0 0 0
48547- 0 0 0 0 0 0 0 0 0 0 0 0
48548- 0 0 0 0 0 0 0 0 0 0 0 0
48549- 0 0 0 0 0 0 0 0 0 0 0 0
48550- 0 0 0 0 0 0 0 0 0 0 0 0
48551- 0 0 0 0 0 0 0 0 0 0 0 0
48552- 0 0 0 0 0 0 0 0 0 0 0 0
48553- 0 0 0 0 0 0 0 0 0 0 0 0
48554- 0 0 0 0 0 0 0 0 0 0 0 0
48555- 0 0 0 0 0 0 0 0 0 0 0 0
48556- 0 0 0 0 0 0 0 0 0 0 0 0
48557- 0 0 0 0 0 0 0 0 0 0 0 0
48558- 0 0 0 0 0 0 0 0 0 0 0 0
48559- 0 0 0 0 0 0 0 0 0 0 0 0
48560- 0 0 0 0 0 0 0 0 0 0 0 0
48561- 0 0 0 0 0 0 0 0 0 0 0 0
48562- 0 0 0 6 6 6 10 10 10 14 14 14
48563- 22 22 22 26 26 26 30 30 30 34 34 34
48564- 30 30 30 30 30 30 26 26 26 18 18 18
48565- 14 14 14 10 10 10 6 6 6 0 0 0
48566- 0 0 0 0 0 0 0 0 0 0 0 0
48567- 0 0 0 0 0 0 0 0 0 0 0 0
48568- 0 0 0 0 0 0 0 0 0 0 0 0
48569- 0 0 0 0 0 0 0 0 0 0 0 0
48570- 0 0 0 0 0 0 0 0 0 0 0 0
48571- 0 0 0 0 0 0 0 0 0 0 0 0
48572- 0 0 0 0 0 0 0 0 0 0 0 0
48573- 0 0 0 0 0 0 0 0 0 0 0 0
48574- 0 0 0 0 0 0 0 0 0 0 0 0
48575- 0 0 0 0 0 1 0 0 1 0 0 0
48576- 0 0 0 0 0 0 0 0 0 0 0 0
48577- 0 0 0 0 0 0 0 0 0 0 0 0
48578- 0 0 0 0 0 0 0 0 0 0 0 0
48579- 0 0 0 0 0 0 0 0 0 0 0 0
48580- 0 0 0 0 0 0 0 0 0 0 0 0
48581- 0 0 0 0 0 0 0 0 0 0 0 0
48582- 6 6 6 14 14 14 26 26 26 42 42 42
48583- 54 54 54 66 66 66 78 78 78 78 78 78
48584- 78 78 78 74 74 74 66 66 66 54 54 54
48585- 42 42 42 26 26 26 18 18 18 10 10 10
48586- 6 6 6 0 0 0 0 0 0 0 0 0
48587- 0 0 0 0 0 0 0 0 0 0 0 0
48588- 0 0 0 0 0 0 0 0 0 0 0 0
48589- 0 0 0 0 0 0 0 0 0 0 0 0
48590- 0 0 0 0 0 0 0 0 0 0 0 0
48591- 0 0 0 0 0 0 0 0 0 0 0 0
48592- 0 0 0 0 0 0 0 0 0 0 0 0
48593- 0 0 0 0 0 0 0 0 0 0 0 0
48594- 0 0 0 0 0 0 0 0 0 0 0 0
48595- 0 0 1 0 0 0 0 0 0 0 0 0
48596- 0 0 0 0 0 0 0 0 0 0 0 0
48597- 0 0 0 0 0 0 0 0 0 0 0 0
48598- 0 0 0 0 0 0 0 0 0 0 0 0
48599- 0 0 0 0 0 0 0 0 0 0 0 0
48600- 0 0 0 0 0 0 0 0 0 0 0 0
48601- 0 0 0 0 0 0 0 0 0 10 10 10
48602- 22 22 22 42 42 42 66 66 66 86 86 86
48603- 66 66 66 38 38 38 38 38 38 22 22 22
48604- 26 26 26 34 34 34 54 54 54 66 66 66
48605- 86 86 86 70 70 70 46 46 46 26 26 26
48606- 14 14 14 6 6 6 0 0 0 0 0 0
48607- 0 0 0 0 0 0 0 0 0 0 0 0
48608- 0 0 0 0 0 0 0 0 0 0 0 0
48609- 0 0 0 0 0 0 0 0 0 0 0 0
48610- 0 0 0 0 0 0 0 0 0 0 0 0
48611- 0 0 0 0 0 0 0 0 0 0 0 0
48612- 0 0 0 0 0 0 0 0 0 0 0 0
48613- 0 0 0 0 0 0 0 0 0 0 0 0
48614- 0 0 0 0 0 0 0 0 0 0 0 0
48615- 0 0 1 0 0 1 0 0 1 0 0 0
48616- 0 0 0 0 0 0 0 0 0 0 0 0
48617- 0 0 0 0 0 0 0 0 0 0 0 0
48618- 0 0 0 0 0 0 0 0 0 0 0 0
48619- 0 0 0 0 0 0 0 0 0 0 0 0
48620- 0 0 0 0 0 0 0 0 0 0 0 0
48621- 0 0 0 0 0 0 10 10 10 26 26 26
48622- 50 50 50 82 82 82 58 58 58 6 6 6
48623- 2 2 6 2 2 6 2 2 6 2 2 6
48624- 2 2 6 2 2 6 2 2 6 2 2 6
48625- 6 6 6 54 54 54 86 86 86 66 66 66
48626- 38 38 38 18 18 18 6 6 6 0 0 0
48627- 0 0 0 0 0 0 0 0 0 0 0 0
48628- 0 0 0 0 0 0 0 0 0 0 0 0
48629- 0 0 0 0 0 0 0 0 0 0 0 0
48630- 0 0 0 0 0 0 0 0 0 0 0 0
48631- 0 0 0 0 0 0 0 0 0 0 0 0
48632- 0 0 0 0 0 0 0 0 0 0 0 0
48633- 0 0 0 0 0 0 0 0 0 0 0 0
48634- 0 0 0 0 0 0 0 0 0 0 0 0
48635- 0 0 0 0 0 0 0 0 0 0 0 0
48636- 0 0 0 0 0 0 0 0 0 0 0 0
48637- 0 0 0 0 0 0 0 0 0 0 0 0
48638- 0 0 0 0 0 0 0 0 0 0 0 0
48639- 0 0 0 0 0 0 0 0 0 0 0 0
48640- 0 0 0 0 0 0 0 0 0 0 0 0
48641- 0 0 0 6 6 6 22 22 22 50 50 50
48642- 78 78 78 34 34 34 2 2 6 2 2 6
48643- 2 2 6 2 2 6 2 2 6 2 2 6
48644- 2 2 6 2 2 6 2 2 6 2 2 6
48645- 2 2 6 2 2 6 6 6 6 70 70 70
48646- 78 78 78 46 46 46 22 22 22 6 6 6
48647- 0 0 0 0 0 0 0 0 0 0 0 0
48648- 0 0 0 0 0 0 0 0 0 0 0 0
48649- 0 0 0 0 0 0 0 0 0 0 0 0
48650- 0 0 0 0 0 0 0 0 0 0 0 0
48651- 0 0 0 0 0 0 0 0 0 0 0 0
48652- 0 0 0 0 0 0 0 0 0 0 0 0
48653- 0 0 0 0 0 0 0 0 0 0 0 0
48654- 0 0 0 0 0 0 0 0 0 0 0 0
48655- 0 0 1 0 0 1 0 0 1 0 0 0
48656- 0 0 0 0 0 0 0 0 0 0 0 0
48657- 0 0 0 0 0 0 0 0 0 0 0 0
48658- 0 0 0 0 0 0 0 0 0 0 0 0
48659- 0 0 0 0 0 0 0 0 0 0 0 0
48660- 0 0 0 0 0 0 0 0 0 0 0 0
48661- 6 6 6 18 18 18 42 42 42 82 82 82
48662- 26 26 26 2 2 6 2 2 6 2 2 6
48663- 2 2 6 2 2 6 2 2 6 2 2 6
48664- 2 2 6 2 2 6 2 2 6 14 14 14
48665- 46 46 46 34 34 34 6 6 6 2 2 6
48666- 42 42 42 78 78 78 42 42 42 18 18 18
48667- 6 6 6 0 0 0 0 0 0 0 0 0
48668- 0 0 0 0 0 0 0 0 0 0 0 0
48669- 0 0 0 0 0 0 0 0 0 0 0 0
48670- 0 0 0 0 0 0 0 0 0 0 0 0
48671- 0 0 0 0 0 0 0 0 0 0 0 0
48672- 0 0 0 0 0 0 0 0 0 0 0 0
48673- 0 0 0 0 0 0 0 0 0 0 0 0
48674- 0 0 0 0 0 0 0 0 0 0 0 0
48675- 0 0 1 0 0 0 0 0 1 0 0 0
48676- 0 0 0 0 0 0 0 0 0 0 0 0
48677- 0 0 0 0 0 0 0 0 0 0 0 0
48678- 0 0 0 0 0 0 0 0 0 0 0 0
48679- 0 0 0 0 0 0 0 0 0 0 0 0
48680- 0 0 0 0 0 0 0 0 0 0 0 0
48681- 10 10 10 30 30 30 66 66 66 58 58 58
48682- 2 2 6 2 2 6 2 2 6 2 2 6
48683- 2 2 6 2 2 6 2 2 6 2 2 6
48684- 2 2 6 2 2 6 2 2 6 26 26 26
48685- 86 86 86 101 101 101 46 46 46 10 10 10
48686- 2 2 6 58 58 58 70 70 70 34 34 34
48687- 10 10 10 0 0 0 0 0 0 0 0 0
48688- 0 0 0 0 0 0 0 0 0 0 0 0
48689- 0 0 0 0 0 0 0 0 0 0 0 0
48690- 0 0 0 0 0 0 0 0 0 0 0 0
48691- 0 0 0 0 0 0 0 0 0 0 0 0
48692- 0 0 0 0 0 0 0 0 0 0 0 0
48693- 0 0 0 0 0 0 0 0 0 0 0 0
48694- 0 0 0 0 0 0 0 0 0 0 0 0
48695- 0 0 1 0 0 1 0 0 1 0 0 0
48696- 0 0 0 0 0 0 0 0 0 0 0 0
48697- 0 0 0 0 0 0 0 0 0 0 0 0
48698- 0 0 0 0 0 0 0 0 0 0 0 0
48699- 0 0 0 0 0 0 0 0 0 0 0 0
48700- 0 0 0 0 0 0 0 0 0 0 0 0
48701- 14 14 14 42 42 42 86 86 86 10 10 10
48702- 2 2 6 2 2 6 2 2 6 2 2 6
48703- 2 2 6 2 2 6 2 2 6 2 2 6
48704- 2 2 6 2 2 6 2 2 6 30 30 30
48705- 94 94 94 94 94 94 58 58 58 26 26 26
48706- 2 2 6 6 6 6 78 78 78 54 54 54
48707- 22 22 22 6 6 6 0 0 0 0 0 0
48708- 0 0 0 0 0 0 0 0 0 0 0 0
48709- 0 0 0 0 0 0 0 0 0 0 0 0
48710- 0 0 0 0 0 0 0 0 0 0 0 0
48711- 0 0 0 0 0 0 0 0 0 0 0 0
48712- 0 0 0 0 0 0 0 0 0 0 0 0
48713- 0 0 0 0 0 0 0 0 0 0 0 0
48714- 0 0 0 0 0 0 0 0 0 0 0 0
48715- 0 0 0 0 0 0 0 0 0 0 0 0
48716- 0 0 0 0 0 0 0 0 0 0 0 0
48717- 0 0 0 0 0 0 0 0 0 0 0 0
48718- 0 0 0 0 0 0 0 0 0 0 0 0
48719- 0 0 0 0 0 0 0 0 0 0 0 0
48720- 0 0 0 0 0 0 0 0 0 6 6 6
48721- 22 22 22 62 62 62 62 62 62 2 2 6
48722- 2 2 6 2 2 6 2 2 6 2 2 6
48723- 2 2 6 2 2 6 2 2 6 2 2 6
48724- 2 2 6 2 2 6 2 2 6 26 26 26
48725- 54 54 54 38 38 38 18 18 18 10 10 10
48726- 2 2 6 2 2 6 34 34 34 82 82 82
48727- 38 38 38 14 14 14 0 0 0 0 0 0
48728- 0 0 0 0 0 0 0 0 0 0 0 0
48729- 0 0 0 0 0 0 0 0 0 0 0 0
48730- 0 0 0 0 0 0 0 0 0 0 0 0
48731- 0 0 0 0 0 0 0 0 0 0 0 0
48732- 0 0 0 0 0 0 0 0 0 0 0 0
48733- 0 0 0 0 0 0 0 0 0 0 0 0
48734- 0 0 0 0 0 0 0 0 0 0 0 0
48735- 0 0 0 0 0 1 0 0 1 0 0 0
48736- 0 0 0 0 0 0 0 0 0 0 0 0
48737- 0 0 0 0 0 0 0 0 0 0 0 0
48738- 0 0 0 0 0 0 0 0 0 0 0 0
48739- 0 0 0 0 0 0 0 0 0 0 0 0
48740- 0 0 0 0 0 0 0 0 0 6 6 6
48741- 30 30 30 78 78 78 30 30 30 2 2 6
48742- 2 2 6 2 2 6 2 2 6 2 2 6
48743- 2 2 6 2 2 6 2 2 6 2 2 6
48744- 2 2 6 2 2 6 2 2 6 10 10 10
48745- 10 10 10 2 2 6 2 2 6 2 2 6
48746- 2 2 6 2 2 6 2 2 6 78 78 78
48747- 50 50 50 18 18 18 6 6 6 0 0 0
48748- 0 0 0 0 0 0 0 0 0 0 0 0
48749- 0 0 0 0 0 0 0 0 0 0 0 0
48750- 0 0 0 0 0 0 0 0 0 0 0 0
48751- 0 0 0 0 0 0 0 0 0 0 0 0
48752- 0 0 0 0 0 0 0 0 0 0 0 0
48753- 0 0 0 0 0 0 0 0 0 0 0 0
48754- 0 0 0 0 0 0 0 0 0 0 0 0
48755- 0 0 1 0 0 0 0 0 0 0 0 0
48756- 0 0 0 0 0 0 0 0 0 0 0 0
48757- 0 0 0 0 0 0 0 0 0 0 0 0
48758- 0 0 0 0 0 0 0 0 0 0 0 0
48759- 0 0 0 0 0 0 0 0 0 0 0 0
48760- 0 0 0 0 0 0 0 0 0 10 10 10
48761- 38 38 38 86 86 86 14 14 14 2 2 6
48762- 2 2 6 2 2 6 2 2 6 2 2 6
48763- 2 2 6 2 2 6 2 2 6 2 2 6
48764- 2 2 6 2 2 6 2 2 6 2 2 6
48765- 2 2 6 2 2 6 2 2 6 2 2 6
48766- 2 2 6 2 2 6 2 2 6 54 54 54
48767- 66 66 66 26 26 26 6 6 6 0 0 0
48768- 0 0 0 0 0 0 0 0 0 0 0 0
48769- 0 0 0 0 0 0 0 0 0 0 0 0
48770- 0 0 0 0 0 0 0 0 0 0 0 0
48771- 0 0 0 0 0 0 0 0 0 0 0 0
48772- 0 0 0 0 0 0 0 0 0 0 0 0
48773- 0 0 0 0 0 0 0 0 0 0 0 0
48774- 0 0 0 0 0 0 0 0 0 0 0 0
48775- 0 0 0 0 0 1 0 0 1 0 0 0
48776- 0 0 0 0 0 0 0 0 0 0 0 0
48777- 0 0 0 0 0 0 0 0 0 0 0 0
48778- 0 0 0 0 0 0 0 0 0 0 0 0
48779- 0 0 0 0 0 0 0 0 0 0 0 0
48780- 0 0 0 0 0 0 0 0 0 14 14 14
48781- 42 42 42 82 82 82 2 2 6 2 2 6
48782- 2 2 6 6 6 6 10 10 10 2 2 6
48783- 2 2 6 2 2 6 2 2 6 2 2 6
48784- 2 2 6 2 2 6 2 2 6 6 6 6
48785- 14 14 14 10 10 10 2 2 6 2 2 6
48786- 2 2 6 2 2 6 2 2 6 18 18 18
48787- 82 82 82 34 34 34 10 10 10 0 0 0
48788- 0 0 0 0 0 0 0 0 0 0 0 0
48789- 0 0 0 0 0 0 0 0 0 0 0 0
48790- 0 0 0 0 0 0 0 0 0 0 0 0
48791- 0 0 0 0 0 0 0 0 0 0 0 0
48792- 0 0 0 0 0 0 0 0 0 0 0 0
48793- 0 0 0 0 0 0 0 0 0 0 0 0
48794- 0 0 0 0 0 0 0 0 0 0 0 0
48795- 0 0 1 0 0 0 0 0 0 0 0 0
48796- 0 0 0 0 0 0 0 0 0 0 0 0
48797- 0 0 0 0 0 0 0 0 0 0 0 0
48798- 0 0 0 0 0 0 0 0 0 0 0 0
48799- 0 0 0 0 0 0 0 0 0 0 0 0
48800- 0 0 0 0 0 0 0 0 0 14 14 14
48801- 46 46 46 86 86 86 2 2 6 2 2 6
48802- 6 6 6 6 6 6 22 22 22 34 34 34
48803- 6 6 6 2 2 6 2 2 6 2 2 6
48804- 2 2 6 2 2 6 18 18 18 34 34 34
48805- 10 10 10 50 50 50 22 22 22 2 2 6
48806- 2 2 6 2 2 6 2 2 6 10 10 10
48807- 86 86 86 42 42 42 14 14 14 0 0 0
48808- 0 0 0 0 0 0 0 0 0 0 0 0
48809- 0 0 0 0 0 0 0 0 0 0 0 0
48810- 0 0 0 0 0 0 0 0 0 0 0 0
48811- 0 0 0 0 0 0 0 0 0 0 0 0
48812- 0 0 0 0 0 0 0 0 0 0 0 0
48813- 0 0 0 0 0 0 0 0 0 0 0 0
48814- 0 0 0 0 0 0 0 0 0 0 0 0
48815- 0 0 1 0 0 1 0 0 1 0 0 0
48816- 0 0 0 0 0 0 0 0 0 0 0 0
48817- 0 0 0 0 0 0 0 0 0 0 0 0
48818- 0 0 0 0 0 0 0 0 0 0 0 0
48819- 0 0 0 0 0 0 0 0 0 0 0 0
48820- 0 0 0 0 0 0 0 0 0 14 14 14
48821- 46 46 46 86 86 86 2 2 6 2 2 6
48822- 38 38 38 116 116 116 94 94 94 22 22 22
48823- 22 22 22 2 2 6 2 2 6 2 2 6
48824- 14 14 14 86 86 86 138 138 138 162 162 162
48825-154 154 154 38 38 38 26 26 26 6 6 6
48826- 2 2 6 2 2 6 2 2 6 2 2 6
48827- 86 86 86 46 46 46 14 14 14 0 0 0
48828- 0 0 0 0 0 0 0 0 0 0 0 0
48829- 0 0 0 0 0 0 0 0 0 0 0 0
48830- 0 0 0 0 0 0 0 0 0 0 0 0
48831- 0 0 0 0 0 0 0 0 0 0 0 0
48832- 0 0 0 0 0 0 0 0 0 0 0 0
48833- 0 0 0 0 0 0 0 0 0 0 0 0
48834- 0 0 0 0 0 0 0 0 0 0 0 0
48835- 0 0 0 0 0 0 0 0 0 0 0 0
48836- 0 0 0 0 0 0 0 0 0 0 0 0
48837- 0 0 0 0 0 0 0 0 0 0 0 0
48838- 0 0 0 0 0 0 0 0 0 0 0 0
48839- 0 0 0 0 0 0 0 0 0 0 0 0
48840- 0 0 0 0 0 0 0 0 0 14 14 14
48841- 46 46 46 86 86 86 2 2 6 14 14 14
48842-134 134 134 198 198 198 195 195 195 116 116 116
48843- 10 10 10 2 2 6 2 2 6 6 6 6
48844-101 98 89 187 187 187 210 210 210 218 218 218
48845-214 214 214 134 134 134 14 14 14 6 6 6
48846- 2 2 6 2 2 6 2 2 6 2 2 6
48847- 86 86 86 50 50 50 18 18 18 6 6 6
48848- 0 0 0 0 0 0 0 0 0 0 0 0
48849- 0 0 0 0 0 0 0 0 0 0 0 0
48850- 0 0 0 0 0 0 0 0 0 0 0 0
48851- 0 0 0 0 0 0 0 0 0 0 0 0
48852- 0 0 0 0 0 0 0 0 0 0 0 0
48853- 0 0 0 0 0 0 0 0 0 0 0 0
48854- 0 0 0 0 0 0 0 0 1 0 0 0
48855- 0 0 1 0 0 1 0 0 1 0 0 0
48856- 0 0 0 0 0 0 0 0 0 0 0 0
48857- 0 0 0 0 0 0 0 0 0 0 0 0
48858- 0 0 0 0 0 0 0 0 0 0 0 0
48859- 0 0 0 0 0 0 0 0 0 0 0 0
48860- 0 0 0 0 0 0 0 0 0 14 14 14
48861- 46 46 46 86 86 86 2 2 6 54 54 54
48862-218 218 218 195 195 195 226 226 226 246 246 246
48863- 58 58 58 2 2 6 2 2 6 30 30 30
48864-210 210 210 253 253 253 174 174 174 123 123 123
48865-221 221 221 234 234 234 74 74 74 2 2 6
48866- 2 2 6 2 2 6 2 2 6 2 2 6
48867- 70 70 70 58 58 58 22 22 22 6 6 6
48868- 0 0 0 0 0 0 0 0 0 0 0 0
48869- 0 0 0 0 0 0 0 0 0 0 0 0
48870- 0 0 0 0 0 0 0 0 0 0 0 0
48871- 0 0 0 0 0 0 0 0 0 0 0 0
48872- 0 0 0 0 0 0 0 0 0 0 0 0
48873- 0 0 0 0 0 0 0 0 0 0 0 0
48874- 0 0 0 0 0 0 0 0 0 0 0 0
48875- 0 0 0 0 0 0 0 0 0 0 0 0
48876- 0 0 0 0 0 0 0 0 0 0 0 0
48877- 0 0 0 0 0 0 0 0 0 0 0 0
48878- 0 0 0 0 0 0 0 0 0 0 0 0
48879- 0 0 0 0 0 0 0 0 0 0 0 0
48880- 0 0 0 0 0 0 0 0 0 14 14 14
48881- 46 46 46 82 82 82 2 2 6 106 106 106
48882-170 170 170 26 26 26 86 86 86 226 226 226
48883-123 123 123 10 10 10 14 14 14 46 46 46
48884-231 231 231 190 190 190 6 6 6 70 70 70
48885- 90 90 90 238 238 238 158 158 158 2 2 6
48886- 2 2 6 2 2 6 2 2 6 2 2 6
48887- 70 70 70 58 58 58 22 22 22 6 6 6
48888- 0 0 0 0 0 0 0 0 0 0 0 0
48889- 0 0 0 0 0 0 0 0 0 0 0 0
48890- 0 0 0 0 0 0 0 0 0 0 0 0
48891- 0 0 0 0 0 0 0 0 0 0 0 0
48892- 0 0 0 0 0 0 0 0 0 0 0 0
48893- 0 0 0 0 0 0 0 0 0 0 0 0
48894- 0 0 0 0 0 0 0 0 1 0 0 0
48895- 0 0 1 0 0 1 0 0 1 0 0 0
48896- 0 0 0 0 0 0 0 0 0 0 0 0
48897- 0 0 0 0 0 0 0 0 0 0 0 0
48898- 0 0 0 0 0 0 0 0 0 0 0 0
48899- 0 0 0 0 0 0 0 0 0 0 0 0
48900- 0 0 0 0 0 0 0 0 0 14 14 14
48901- 42 42 42 86 86 86 6 6 6 116 116 116
48902-106 106 106 6 6 6 70 70 70 149 149 149
48903-128 128 128 18 18 18 38 38 38 54 54 54
48904-221 221 221 106 106 106 2 2 6 14 14 14
48905- 46 46 46 190 190 190 198 198 198 2 2 6
48906- 2 2 6 2 2 6 2 2 6 2 2 6
48907- 74 74 74 62 62 62 22 22 22 6 6 6
48908- 0 0 0 0 0 0 0 0 0 0 0 0
48909- 0 0 0 0 0 0 0 0 0 0 0 0
48910- 0 0 0 0 0 0 0 0 0 0 0 0
48911- 0 0 0 0 0 0 0 0 0 0 0 0
48912- 0 0 0 0 0 0 0 0 0 0 0 0
48913- 0 0 0 0 0 0 0 0 0 0 0 0
48914- 0 0 0 0 0 0 0 0 1 0 0 0
48915- 0 0 1 0 0 0 0 0 1 0 0 0
48916- 0 0 0 0 0 0 0 0 0 0 0 0
48917- 0 0 0 0 0 0 0 0 0 0 0 0
48918- 0 0 0 0 0 0 0 0 0 0 0 0
48919- 0 0 0 0 0 0 0 0 0 0 0 0
48920- 0 0 0 0 0 0 0 0 0 14 14 14
48921- 42 42 42 94 94 94 14 14 14 101 101 101
48922-128 128 128 2 2 6 18 18 18 116 116 116
48923-118 98 46 121 92 8 121 92 8 98 78 10
48924-162 162 162 106 106 106 2 2 6 2 2 6
48925- 2 2 6 195 195 195 195 195 195 6 6 6
48926- 2 2 6 2 2 6 2 2 6 2 2 6
48927- 74 74 74 62 62 62 22 22 22 6 6 6
48928- 0 0 0 0 0 0 0 0 0 0 0 0
48929- 0 0 0 0 0 0 0 0 0 0 0 0
48930- 0 0 0 0 0 0 0 0 0 0 0 0
48931- 0 0 0 0 0 0 0 0 0 0 0 0
48932- 0 0 0 0 0 0 0 0 0 0 0 0
48933- 0 0 0 0 0 0 0 0 0 0 0 0
48934- 0 0 0 0 0 0 0 0 1 0 0 1
48935- 0 0 1 0 0 0 0 0 1 0 0 0
48936- 0 0 0 0 0 0 0 0 0 0 0 0
48937- 0 0 0 0 0 0 0 0 0 0 0 0
48938- 0 0 0 0 0 0 0 0 0 0 0 0
48939- 0 0 0 0 0 0 0 0 0 0 0 0
48940- 0 0 0 0 0 0 0 0 0 10 10 10
48941- 38 38 38 90 90 90 14 14 14 58 58 58
48942-210 210 210 26 26 26 54 38 6 154 114 10
48943-226 170 11 236 186 11 225 175 15 184 144 12
48944-215 174 15 175 146 61 37 26 9 2 2 6
48945- 70 70 70 246 246 246 138 138 138 2 2 6
48946- 2 2 6 2 2 6 2 2 6 2 2 6
48947- 70 70 70 66 66 66 26 26 26 6 6 6
48948- 0 0 0 0 0 0 0 0 0 0 0 0
48949- 0 0 0 0 0 0 0 0 0 0 0 0
48950- 0 0 0 0 0 0 0 0 0 0 0 0
48951- 0 0 0 0 0 0 0 0 0 0 0 0
48952- 0 0 0 0 0 0 0 0 0 0 0 0
48953- 0 0 0 0 0 0 0 0 0 0 0 0
48954- 0 0 0 0 0 0 0 0 0 0 0 0
48955- 0 0 0 0 0 0 0 0 0 0 0 0
48956- 0 0 0 0 0 0 0 0 0 0 0 0
48957- 0 0 0 0 0 0 0 0 0 0 0 0
48958- 0 0 0 0 0 0 0 0 0 0 0 0
48959- 0 0 0 0 0 0 0 0 0 0 0 0
48960- 0 0 0 0 0 0 0 0 0 10 10 10
48961- 38 38 38 86 86 86 14 14 14 10 10 10
48962-195 195 195 188 164 115 192 133 9 225 175 15
48963-239 182 13 234 190 10 232 195 16 232 200 30
48964-245 207 45 241 208 19 232 195 16 184 144 12
48965-218 194 134 211 206 186 42 42 42 2 2 6
48966- 2 2 6 2 2 6 2 2 6 2 2 6
48967- 50 50 50 74 74 74 30 30 30 6 6 6
48968- 0 0 0 0 0 0 0 0 0 0 0 0
48969- 0 0 0 0 0 0 0 0 0 0 0 0
48970- 0 0 0 0 0 0 0 0 0 0 0 0
48971- 0 0 0 0 0 0 0 0 0 0 0 0
48972- 0 0 0 0 0 0 0 0 0 0 0 0
48973- 0 0 0 0 0 0 0 0 0 0 0 0
48974- 0 0 0 0 0 0 0 0 0 0 0 0
48975- 0 0 0 0 0 0 0 0 0 0 0 0
48976- 0 0 0 0 0 0 0 0 0 0 0 0
48977- 0 0 0 0 0 0 0 0 0 0 0 0
48978- 0 0 0 0 0 0 0 0 0 0 0 0
48979- 0 0 0 0 0 0 0 0 0 0 0 0
48980- 0 0 0 0 0 0 0 0 0 10 10 10
48981- 34 34 34 86 86 86 14 14 14 2 2 6
48982-121 87 25 192 133 9 219 162 10 239 182 13
48983-236 186 11 232 195 16 241 208 19 244 214 54
48984-246 218 60 246 218 38 246 215 20 241 208 19
48985-241 208 19 226 184 13 121 87 25 2 2 6
48986- 2 2 6 2 2 6 2 2 6 2 2 6
48987- 50 50 50 82 82 82 34 34 34 10 10 10
48988- 0 0 0 0 0 0 0 0 0 0 0 0
48989- 0 0 0 0 0 0 0 0 0 0 0 0
48990- 0 0 0 0 0 0 0 0 0 0 0 0
48991- 0 0 0 0 0 0 0 0 0 0 0 0
48992- 0 0 0 0 0 0 0 0 0 0 0 0
48993- 0 0 0 0 0 0 0 0 0 0 0 0
48994- 0 0 0 0 0 0 0 0 0 0 0 0
48995- 0 0 0 0 0 0 0 0 0 0 0 0
48996- 0 0 0 0 0 0 0 0 0 0 0 0
48997- 0 0 0 0 0 0 0 0 0 0 0 0
48998- 0 0 0 0 0 0 0 0 0 0 0 0
48999- 0 0 0 0 0 0 0 0 0 0 0 0
49000- 0 0 0 0 0 0 0 0 0 10 10 10
49001- 34 34 34 82 82 82 30 30 30 61 42 6
49002-180 123 7 206 145 10 230 174 11 239 182 13
49003-234 190 10 238 202 15 241 208 19 246 218 74
49004-246 218 38 246 215 20 246 215 20 246 215 20
49005-226 184 13 215 174 15 184 144 12 6 6 6
49006- 2 2 6 2 2 6 2 2 6 2 2 6
49007- 26 26 26 94 94 94 42 42 42 14 14 14
49008- 0 0 0 0 0 0 0 0 0 0 0 0
49009- 0 0 0 0 0 0 0 0 0 0 0 0
49010- 0 0 0 0 0 0 0 0 0 0 0 0
49011- 0 0 0 0 0 0 0 0 0 0 0 0
49012- 0 0 0 0 0 0 0 0 0 0 0 0
49013- 0 0 0 0 0 0 0 0 0 0 0 0
49014- 0 0 0 0 0 0 0 0 0 0 0 0
49015- 0 0 0 0 0 0 0 0 0 0 0 0
49016- 0 0 0 0 0 0 0 0 0 0 0 0
49017- 0 0 0 0 0 0 0 0 0 0 0 0
49018- 0 0 0 0 0 0 0 0 0 0 0 0
49019- 0 0 0 0 0 0 0 0 0 0 0 0
49020- 0 0 0 0 0 0 0 0 0 10 10 10
49021- 30 30 30 78 78 78 50 50 50 104 69 6
49022-192 133 9 216 158 10 236 178 12 236 186 11
49023-232 195 16 241 208 19 244 214 54 245 215 43
49024-246 215 20 246 215 20 241 208 19 198 155 10
49025-200 144 11 216 158 10 156 118 10 2 2 6
49026- 2 2 6 2 2 6 2 2 6 2 2 6
49027- 6 6 6 90 90 90 54 54 54 18 18 18
49028- 6 6 6 0 0 0 0 0 0 0 0 0
49029- 0 0 0 0 0 0 0 0 0 0 0 0
49030- 0 0 0 0 0 0 0 0 0 0 0 0
49031- 0 0 0 0 0 0 0 0 0 0 0 0
49032- 0 0 0 0 0 0 0 0 0 0 0 0
49033- 0 0 0 0 0 0 0 0 0 0 0 0
49034- 0 0 0 0 0 0 0 0 0 0 0 0
49035- 0 0 0 0 0 0 0 0 0 0 0 0
49036- 0 0 0 0 0 0 0 0 0 0 0 0
49037- 0 0 0 0 0 0 0 0 0 0 0 0
49038- 0 0 0 0 0 0 0 0 0 0 0 0
49039- 0 0 0 0 0 0 0 0 0 0 0 0
49040- 0 0 0 0 0 0 0 0 0 10 10 10
49041- 30 30 30 78 78 78 46 46 46 22 22 22
49042-137 92 6 210 162 10 239 182 13 238 190 10
49043-238 202 15 241 208 19 246 215 20 246 215 20
49044-241 208 19 203 166 17 185 133 11 210 150 10
49045-216 158 10 210 150 10 102 78 10 2 2 6
49046- 6 6 6 54 54 54 14 14 14 2 2 6
49047- 2 2 6 62 62 62 74 74 74 30 30 30
49048- 10 10 10 0 0 0 0 0 0 0 0 0
49049- 0 0 0 0 0 0 0 0 0 0 0 0
49050- 0 0 0 0 0 0 0 0 0 0 0 0
49051- 0 0 0 0 0 0 0 0 0 0 0 0
49052- 0 0 0 0 0 0 0 0 0 0 0 0
49053- 0 0 0 0 0 0 0 0 0 0 0 0
49054- 0 0 0 0 0 0 0 0 0 0 0 0
49055- 0 0 0 0 0 0 0 0 0 0 0 0
49056- 0 0 0 0 0 0 0 0 0 0 0 0
49057- 0 0 0 0 0 0 0 0 0 0 0 0
49058- 0 0 0 0 0 0 0 0 0 0 0 0
49059- 0 0 0 0 0 0 0 0 0 0 0 0
49060- 0 0 0 0 0 0 0 0 0 10 10 10
49061- 34 34 34 78 78 78 50 50 50 6 6 6
49062- 94 70 30 139 102 15 190 146 13 226 184 13
49063-232 200 30 232 195 16 215 174 15 190 146 13
49064-168 122 10 192 133 9 210 150 10 213 154 11
49065-202 150 34 182 157 106 101 98 89 2 2 6
49066- 2 2 6 78 78 78 116 116 116 58 58 58
49067- 2 2 6 22 22 22 90 90 90 46 46 46
49068- 18 18 18 6 6 6 0 0 0 0 0 0
49069- 0 0 0 0 0 0 0 0 0 0 0 0
49070- 0 0 0 0 0 0 0 0 0 0 0 0
49071- 0 0 0 0 0 0 0 0 0 0 0 0
49072- 0 0 0 0 0 0 0 0 0 0 0 0
49073- 0 0 0 0 0 0 0 0 0 0 0 0
49074- 0 0 0 0 0 0 0 0 0 0 0 0
49075- 0 0 0 0 0 0 0 0 0 0 0 0
49076- 0 0 0 0 0 0 0 0 0 0 0 0
49077- 0 0 0 0 0 0 0 0 0 0 0 0
49078- 0 0 0 0 0 0 0 0 0 0 0 0
49079- 0 0 0 0 0 0 0 0 0 0 0 0
49080- 0 0 0 0 0 0 0 0 0 10 10 10
49081- 38 38 38 86 86 86 50 50 50 6 6 6
49082-128 128 128 174 154 114 156 107 11 168 122 10
49083-198 155 10 184 144 12 197 138 11 200 144 11
49084-206 145 10 206 145 10 197 138 11 188 164 115
49085-195 195 195 198 198 198 174 174 174 14 14 14
49086- 2 2 6 22 22 22 116 116 116 116 116 116
49087- 22 22 22 2 2 6 74 74 74 70 70 70
49088- 30 30 30 10 10 10 0 0 0 0 0 0
49089- 0 0 0 0 0 0 0 0 0 0 0 0
49090- 0 0 0 0 0 0 0 0 0 0 0 0
49091- 0 0 0 0 0 0 0 0 0 0 0 0
49092- 0 0 0 0 0 0 0 0 0 0 0 0
49093- 0 0 0 0 0 0 0 0 0 0 0 0
49094- 0 0 0 0 0 0 0 0 0 0 0 0
49095- 0 0 0 0 0 0 0 0 0 0 0 0
49096- 0 0 0 0 0 0 0 0 0 0 0 0
49097- 0 0 0 0 0 0 0 0 0 0 0 0
49098- 0 0 0 0 0 0 0 0 0 0 0 0
49099- 0 0 0 0 0 0 0 0 0 0 0 0
49100- 0 0 0 0 0 0 6 6 6 18 18 18
49101- 50 50 50 101 101 101 26 26 26 10 10 10
49102-138 138 138 190 190 190 174 154 114 156 107 11
49103-197 138 11 200 144 11 197 138 11 192 133 9
49104-180 123 7 190 142 34 190 178 144 187 187 187
49105-202 202 202 221 221 221 214 214 214 66 66 66
49106- 2 2 6 2 2 6 50 50 50 62 62 62
49107- 6 6 6 2 2 6 10 10 10 90 90 90
49108- 50 50 50 18 18 18 6 6 6 0 0 0
49109- 0 0 0 0 0 0 0 0 0 0 0 0
49110- 0 0 0 0 0 0 0 0 0 0 0 0
49111- 0 0 0 0 0 0 0 0 0 0 0 0
49112- 0 0 0 0 0 0 0 0 0 0 0 0
49113- 0 0 0 0 0 0 0 0 0 0 0 0
49114- 0 0 0 0 0 0 0 0 0 0 0 0
49115- 0 0 0 0 0 0 0 0 0 0 0 0
49116- 0 0 0 0 0 0 0 0 0 0 0 0
49117- 0 0 0 0 0 0 0 0 0 0 0 0
49118- 0 0 0 0 0 0 0 0 0 0 0 0
49119- 0 0 0 0 0 0 0 0 0 0 0 0
49120- 0 0 0 0 0 0 10 10 10 34 34 34
49121- 74 74 74 74 74 74 2 2 6 6 6 6
49122-144 144 144 198 198 198 190 190 190 178 166 146
49123-154 121 60 156 107 11 156 107 11 168 124 44
49124-174 154 114 187 187 187 190 190 190 210 210 210
49125-246 246 246 253 253 253 253 253 253 182 182 182
49126- 6 6 6 2 2 6 2 2 6 2 2 6
49127- 2 2 6 2 2 6 2 2 6 62 62 62
49128- 74 74 74 34 34 34 14 14 14 0 0 0
49129- 0 0 0 0 0 0 0 0 0 0 0 0
49130- 0 0 0 0 0 0 0 0 0 0 0 0
49131- 0 0 0 0 0 0 0 0 0 0 0 0
49132- 0 0 0 0 0 0 0 0 0 0 0 0
49133- 0 0 0 0 0 0 0 0 0 0 0 0
49134- 0 0 0 0 0 0 0 0 0 0 0 0
49135- 0 0 0 0 0 0 0 0 0 0 0 0
49136- 0 0 0 0 0 0 0 0 0 0 0 0
49137- 0 0 0 0 0 0 0 0 0 0 0 0
49138- 0 0 0 0 0 0 0 0 0 0 0 0
49139- 0 0 0 0 0 0 0 0 0 0 0 0
49140- 0 0 0 10 10 10 22 22 22 54 54 54
49141- 94 94 94 18 18 18 2 2 6 46 46 46
49142-234 234 234 221 221 221 190 190 190 190 190 190
49143-190 190 190 187 187 187 187 187 187 190 190 190
49144-190 190 190 195 195 195 214 214 214 242 242 242
49145-253 253 253 253 253 253 253 253 253 253 253 253
49146- 82 82 82 2 2 6 2 2 6 2 2 6
49147- 2 2 6 2 2 6 2 2 6 14 14 14
49148- 86 86 86 54 54 54 22 22 22 6 6 6
49149- 0 0 0 0 0 0 0 0 0 0 0 0
49150- 0 0 0 0 0 0 0 0 0 0 0 0
49151- 0 0 0 0 0 0 0 0 0 0 0 0
49152- 0 0 0 0 0 0 0 0 0 0 0 0
49153- 0 0 0 0 0 0 0 0 0 0 0 0
49154- 0 0 0 0 0 0 0 0 0 0 0 0
49155- 0 0 0 0 0 0 0 0 0 0 0 0
49156- 0 0 0 0 0 0 0 0 0 0 0 0
49157- 0 0 0 0 0 0 0 0 0 0 0 0
49158- 0 0 0 0 0 0 0 0 0 0 0 0
49159- 0 0 0 0 0 0 0 0 0 0 0 0
49160- 6 6 6 18 18 18 46 46 46 90 90 90
49161- 46 46 46 18 18 18 6 6 6 182 182 182
49162-253 253 253 246 246 246 206 206 206 190 190 190
49163-190 190 190 190 190 190 190 190 190 190 190 190
49164-206 206 206 231 231 231 250 250 250 253 253 253
49165-253 253 253 253 253 253 253 253 253 253 253 253
49166-202 202 202 14 14 14 2 2 6 2 2 6
49167- 2 2 6 2 2 6 2 2 6 2 2 6
49168- 42 42 42 86 86 86 42 42 42 18 18 18
49169- 6 6 6 0 0 0 0 0 0 0 0 0
49170- 0 0 0 0 0 0 0 0 0 0 0 0
49171- 0 0 0 0 0 0 0 0 0 0 0 0
49172- 0 0 0 0 0 0 0 0 0 0 0 0
49173- 0 0 0 0 0 0 0 0 0 0 0 0
49174- 0 0 0 0 0 0 0 0 0 0 0 0
49175- 0 0 0 0 0 0 0 0 0 0 0 0
49176- 0 0 0 0 0 0 0 0 0 0 0 0
49177- 0 0 0 0 0 0 0 0 0 0 0 0
49178- 0 0 0 0 0 0 0 0 0 0 0 0
49179- 0 0 0 0 0 0 0 0 0 6 6 6
49180- 14 14 14 38 38 38 74 74 74 66 66 66
49181- 2 2 6 6 6 6 90 90 90 250 250 250
49182-253 253 253 253 253 253 238 238 238 198 198 198
49183-190 190 190 190 190 190 195 195 195 221 221 221
49184-246 246 246 253 253 253 253 253 253 253 253 253
49185-253 253 253 253 253 253 253 253 253 253 253 253
49186-253 253 253 82 82 82 2 2 6 2 2 6
49187- 2 2 6 2 2 6 2 2 6 2 2 6
49188- 2 2 6 78 78 78 70 70 70 34 34 34
49189- 14 14 14 6 6 6 0 0 0 0 0 0
49190- 0 0 0 0 0 0 0 0 0 0 0 0
49191- 0 0 0 0 0 0 0 0 0 0 0 0
49192- 0 0 0 0 0 0 0 0 0 0 0 0
49193- 0 0 0 0 0 0 0 0 0 0 0 0
49194- 0 0 0 0 0 0 0 0 0 0 0 0
49195- 0 0 0 0 0 0 0 0 0 0 0 0
49196- 0 0 0 0 0 0 0 0 0 0 0 0
49197- 0 0 0 0 0 0 0 0 0 0 0 0
49198- 0 0 0 0 0 0 0 0 0 0 0 0
49199- 0 0 0 0 0 0 0 0 0 14 14 14
49200- 34 34 34 66 66 66 78 78 78 6 6 6
49201- 2 2 6 18 18 18 218 218 218 253 253 253
49202-253 253 253 253 253 253 253 253 253 246 246 246
49203-226 226 226 231 231 231 246 246 246 253 253 253
49204-253 253 253 253 253 253 253 253 253 253 253 253
49205-253 253 253 253 253 253 253 253 253 253 253 253
49206-253 253 253 178 178 178 2 2 6 2 2 6
49207- 2 2 6 2 2 6 2 2 6 2 2 6
49208- 2 2 6 18 18 18 90 90 90 62 62 62
49209- 30 30 30 10 10 10 0 0 0 0 0 0
49210- 0 0 0 0 0 0 0 0 0 0 0 0
49211- 0 0 0 0 0 0 0 0 0 0 0 0
49212- 0 0 0 0 0 0 0 0 0 0 0 0
49213- 0 0 0 0 0 0 0 0 0 0 0 0
49214- 0 0 0 0 0 0 0 0 0 0 0 0
49215- 0 0 0 0 0 0 0 0 0 0 0 0
49216- 0 0 0 0 0 0 0 0 0 0 0 0
49217- 0 0 0 0 0 0 0 0 0 0 0 0
49218- 0 0 0 0 0 0 0 0 0 0 0 0
49219- 0 0 0 0 0 0 10 10 10 26 26 26
49220- 58 58 58 90 90 90 18 18 18 2 2 6
49221- 2 2 6 110 110 110 253 253 253 253 253 253
49222-253 253 253 253 253 253 253 253 253 253 253 253
49223-250 250 250 253 253 253 253 253 253 253 253 253
49224-253 253 253 253 253 253 253 253 253 253 253 253
49225-253 253 253 253 253 253 253 253 253 253 253 253
49226-253 253 253 231 231 231 18 18 18 2 2 6
49227- 2 2 6 2 2 6 2 2 6 2 2 6
49228- 2 2 6 2 2 6 18 18 18 94 94 94
49229- 54 54 54 26 26 26 10 10 10 0 0 0
49230- 0 0 0 0 0 0 0 0 0 0 0 0
49231- 0 0 0 0 0 0 0 0 0 0 0 0
49232- 0 0 0 0 0 0 0 0 0 0 0 0
49233- 0 0 0 0 0 0 0 0 0 0 0 0
49234- 0 0 0 0 0 0 0 0 0 0 0 0
49235- 0 0 0 0 0 0 0 0 0 0 0 0
49236- 0 0 0 0 0 0 0 0 0 0 0 0
49237- 0 0 0 0 0 0 0 0 0 0 0 0
49238- 0 0 0 0 0 0 0 0 0 0 0 0
49239- 0 0 0 6 6 6 22 22 22 50 50 50
49240- 90 90 90 26 26 26 2 2 6 2 2 6
49241- 14 14 14 195 195 195 250 250 250 253 253 253
49242-253 253 253 253 253 253 253 253 253 253 253 253
49243-253 253 253 253 253 253 253 253 253 253 253 253
49244-253 253 253 253 253 253 253 253 253 253 253 253
49245-253 253 253 253 253 253 253 253 253 253 253 253
49246-250 250 250 242 242 242 54 54 54 2 2 6
49247- 2 2 6 2 2 6 2 2 6 2 2 6
49248- 2 2 6 2 2 6 2 2 6 38 38 38
49249- 86 86 86 50 50 50 22 22 22 6 6 6
49250- 0 0 0 0 0 0 0 0 0 0 0 0
49251- 0 0 0 0 0 0 0 0 0 0 0 0
49252- 0 0 0 0 0 0 0 0 0 0 0 0
49253- 0 0 0 0 0 0 0 0 0 0 0 0
49254- 0 0 0 0 0 0 0 0 0 0 0 0
49255- 0 0 0 0 0 0 0 0 0 0 0 0
49256- 0 0 0 0 0 0 0 0 0 0 0 0
49257- 0 0 0 0 0 0 0 0 0 0 0 0
49258- 0 0 0 0 0 0 0 0 0 0 0 0
49259- 6 6 6 14 14 14 38 38 38 82 82 82
49260- 34 34 34 2 2 6 2 2 6 2 2 6
49261- 42 42 42 195 195 195 246 246 246 253 253 253
49262-253 253 253 253 253 253 253 253 253 250 250 250
49263-242 242 242 242 242 242 250 250 250 253 253 253
49264-253 253 253 253 253 253 253 253 253 253 253 253
49265-253 253 253 250 250 250 246 246 246 238 238 238
49266-226 226 226 231 231 231 101 101 101 6 6 6
49267- 2 2 6 2 2 6 2 2 6 2 2 6
49268- 2 2 6 2 2 6 2 2 6 2 2 6
49269- 38 38 38 82 82 82 42 42 42 14 14 14
49270- 6 6 6 0 0 0 0 0 0 0 0 0
49271- 0 0 0 0 0 0 0 0 0 0 0 0
49272- 0 0 0 0 0 0 0 0 0 0 0 0
49273- 0 0 0 0 0 0 0 0 0 0 0 0
49274- 0 0 0 0 0 0 0 0 0 0 0 0
49275- 0 0 0 0 0 0 0 0 0 0 0 0
49276- 0 0 0 0 0 0 0 0 0 0 0 0
49277- 0 0 0 0 0 0 0 0 0 0 0 0
49278- 0 0 0 0 0 0 0 0 0 0 0 0
49279- 10 10 10 26 26 26 62 62 62 66 66 66
49280- 2 2 6 2 2 6 2 2 6 6 6 6
49281- 70 70 70 170 170 170 206 206 206 234 234 234
49282-246 246 246 250 250 250 250 250 250 238 238 238
49283-226 226 226 231 231 231 238 238 238 250 250 250
49284-250 250 250 250 250 250 246 246 246 231 231 231
49285-214 214 214 206 206 206 202 202 202 202 202 202
49286-198 198 198 202 202 202 182 182 182 18 18 18
49287- 2 2 6 2 2 6 2 2 6 2 2 6
49288- 2 2 6 2 2 6 2 2 6 2 2 6
49289- 2 2 6 62 62 62 66 66 66 30 30 30
49290- 10 10 10 0 0 0 0 0 0 0 0 0
49291- 0 0 0 0 0 0 0 0 0 0 0 0
49292- 0 0 0 0 0 0 0 0 0 0 0 0
49293- 0 0 0 0 0 0 0 0 0 0 0 0
49294- 0 0 0 0 0 0 0 0 0 0 0 0
49295- 0 0 0 0 0 0 0 0 0 0 0 0
49296- 0 0 0 0 0 0 0 0 0 0 0 0
49297- 0 0 0 0 0 0 0 0 0 0 0 0
49298- 0 0 0 0 0 0 0 0 0 0 0 0
49299- 14 14 14 42 42 42 82 82 82 18 18 18
49300- 2 2 6 2 2 6 2 2 6 10 10 10
49301- 94 94 94 182 182 182 218 218 218 242 242 242
49302-250 250 250 253 253 253 253 253 253 250 250 250
49303-234 234 234 253 253 253 253 253 253 253 253 253
49304-253 253 253 253 253 253 253 253 253 246 246 246
49305-238 238 238 226 226 226 210 210 210 202 202 202
49306-195 195 195 195 195 195 210 210 210 158 158 158
49307- 6 6 6 14 14 14 50 50 50 14 14 14
49308- 2 2 6 2 2 6 2 2 6 2 2 6
49309- 2 2 6 6 6 6 86 86 86 46 46 46
49310- 18 18 18 6 6 6 0 0 0 0 0 0
49311- 0 0 0 0 0 0 0 0 0 0 0 0
49312- 0 0 0 0 0 0 0 0 0 0 0 0
49313- 0 0 0 0 0 0 0 0 0 0 0 0
49314- 0 0 0 0 0 0 0 0 0 0 0 0
49315- 0 0 0 0 0 0 0 0 0 0 0 0
49316- 0 0 0 0 0 0 0 0 0 0 0 0
49317- 0 0 0 0 0 0 0 0 0 0 0 0
49318- 0 0 0 0 0 0 0 0 0 6 6 6
49319- 22 22 22 54 54 54 70 70 70 2 2 6
49320- 2 2 6 10 10 10 2 2 6 22 22 22
49321-166 166 166 231 231 231 250 250 250 253 253 253
49322-253 253 253 253 253 253 253 253 253 250 250 250
49323-242 242 242 253 253 253 253 253 253 253 253 253
49324-253 253 253 253 253 253 253 253 253 253 253 253
49325-253 253 253 253 253 253 253 253 253 246 246 246
49326-231 231 231 206 206 206 198 198 198 226 226 226
49327- 94 94 94 2 2 6 6 6 6 38 38 38
49328- 30 30 30 2 2 6 2 2 6 2 2 6
49329- 2 2 6 2 2 6 62 62 62 66 66 66
49330- 26 26 26 10 10 10 0 0 0 0 0 0
49331- 0 0 0 0 0 0 0 0 0 0 0 0
49332- 0 0 0 0 0 0 0 0 0 0 0 0
49333- 0 0 0 0 0 0 0 0 0 0 0 0
49334- 0 0 0 0 0 0 0 0 0 0 0 0
49335- 0 0 0 0 0 0 0 0 0 0 0 0
49336- 0 0 0 0 0 0 0 0 0 0 0 0
49337- 0 0 0 0 0 0 0 0 0 0 0 0
49338- 0 0 0 0 0 0 0 0 0 10 10 10
49339- 30 30 30 74 74 74 50 50 50 2 2 6
49340- 26 26 26 26 26 26 2 2 6 106 106 106
49341-238 238 238 253 253 253 253 253 253 253 253 253
49342-253 253 253 253 253 253 253 253 253 253 253 253
49343-253 253 253 253 253 253 253 253 253 253 253 253
49344-253 253 253 253 253 253 253 253 253 253 253 253
49345-253 253 253 253 253 253 253 253 253 253 253 253
49346-253 253 253 246 246 246 218 218 218 202 202 202
49347-210 210 210 14 14 14 2 2 6 2 2 6
49348- 30 30 30 22 22 22 2 2 6 2 2 6
49349- 2 2 6 2 2 6 18 18 18 86 86 86
49350- 42 42 42 14 14 14 0 0 0 0 0 0
49351- 0 0 0 0 0 0 0 0 0 0 0 0
49352- 0 0 0 0 0 0 0 0 0 0 0 0
49353- 0 0 0 0 0 0 0 0 0 0 0 0
49354- 0 0 0 0 0 0 0 0 0 0 0 0
49355- 0 0 0 0 0 0 0 0 0 0 0 0
49356- 0 0 0 0 0 0 0 0 0 0 0 0
49357- 0 0 0 0 0 0 0 0 0 0 0 0
49358- 0 0 0 0 0 0 0 0 0 14 14 14
49359- 42 42 42 90 90 90 22 22 22 2 2 6
49360- 42 42 42 2 2 6 18 18 18 218 218 218
49361-253 253 253 253 253 253 253 253 253 253 253 253
49362-253 253 253 253 253 253 253 253 253 253 253 253
49363-253 253 253 253 253 253 253 253 253 253 253 253
49364-253 253 253 253 253 253 253 253 253 253 253 253
49365-253 253 253 253 253 253 253 253 253 253 253 253
49366-253 253 253 253 253 253 250 250 250 221 221 221
49367-218 218 218 101 101 101 2 2 6 14 14 14
49368- 18 18 18 38 38 38 10 10 10 2 2 6
49369- 2 2 6 2 2 6 2 2 6 78 78 78
49370- 58 58 58 22 22 22 6 6 6 0 0 0
49371- 0 0 0 0 0 0 0 0 0 0 0 0
49372- 0 0 0 0 0 0 0 0 0 0 0 0
49373- 0 0 0 0 0 0 0 0 0 0 0 0
49374- 0 0 0 0 0 0 0 0 0 0 0 0
49375- 0 0 0 0 0 0 0 0 0 0 0 0
49376- 0 0 0 0 0 0 0 0 0 0 0 0
49377- 0 0 0 0 0 0 0 0 0 0 0 0
49378- 0 0 0 0 0 0 6 6 6 18 18 18
49379- 54 54 54 82 82 82 2 2 6 26 26 26
49380- 22 22 22 2 2 6 123 123 123 253 253 253
49381-253 253 253 253 253 253 253 253 253 253 253 253
49382-253 253 253 253 253 253 253 253 253 253 253 253
49383-253 253 253 253 253 253 253 253 253 253 253 253
49384-253 253 253 253 253 253 253 253 253 253 253 253
49385-253 253 253 253 253 253 253 253 253 253 253 253
49386-253 253 253 253 253 253 253 253 253 250 250 250
49387-238 238 238 198 198 198 6 6 6 38 38 38
49388- 58 58 58 26 26 26 38 38 38 2 2 6
49389- 2 2 6 2 2 6 2 2 6 46 46 46
49390- 78 78 78 30 30 30 10 10 10 0 0 0
49391- 0 0 0 0 0 0 0 0 0 0 0 0
49392- 0 0 0 0 0 0 0 0 0 0 0 0
49393- 0 0 0 0 0 0 0 0 0 0 0 0
49394- 0 0 0 0 0 0 0 0 0 0 0 0
49395- 0 0 0 0 0 0 0 0 0 0 0 0
49396- 0 0 0 0 0 0 0 0 0 0 0 0
49397- 0 0 0 0 0 0 0 0 0 0 0 0
49398- 0 0 0 0 0 0 10 10 10 30 30 30
49399- 74 74 74 58 58 58 2 2 6 42 42 42
49400- 2 2 6 22 22 22 231 231 231 253 253 253
49401-253 253 253 253 253 253 253 253 253 253 253 253
49402-253 253 253 253 253 253 253 253 253 250 250 250
49403-253 253 253 253 253 253 253 253 253 253 253 253
49404-253 253 253 253 253 253 253 253 253 253 253 253
49405-253 253 253 253 253 253 253 253 253 253 253 253
49406-253 253 253 253 253 253 253 253 253 253 253 253
49407-253 253 253 246 246 246 46 46 46 38 38 38
49408- 42 42 42 14 14 14 38 38 38 14 14 14
49409- 2 2 6 2 2 6 2 2 6 6 6 6
49410- 86 86 86 46 46 46 14 14 14 0 0 0
49411- 0 0 0 0 0 0 0 0 0 0 0 0
49412- 0 0 0 0 0 0 0 0 0 0 0 0
49413- 0 0 0 0 0 0 0 0 0 0 0 0
49414- 0 0 0 0 0 0 0 0 0 0 0 0
49415- 0 0 0 0 0 0 0 0 0 0 0 0
49416- 0 0 0 0 0 0 0 0 0 0 0 0
49417- 0 0 0 0 0 0 0 0 0 0 0 0
49418- 0 0 0 6 6 6 14 14 14 42 42 42
49419- 90 90 90 18 18 18 18 18 18 26 26 26
49420- 2 2 6 116 116 116 253 253 253 253 253 253
49421-253 253 253 253 253 253 253 253 253 253 253 253
49422-253 253 253 253 253 253 250 250 250 238 238 238
49423-253 253 253 253 253 253 253 253 253 253 253 253
49424-253 253 253 253 253 253 253 253 253 253 253 253
49425-253 253 253 253 253 253 253 253 253 253 253 253
49426-253 253 253 253 253 253 253 253 253 253 253 253
49427-253 253 253 253 253 253 94 94 94 6 6 6
49428- 2 2 6 2 2 6 10 10 10 34 34 34
49429- 2 2 6 2 2 6 2 2 6 2 2 6
49430- 74 74 74 58 58 58 22 22 22 6 6 6
49431- 0 0 0 0 0 0 0 0 0 0 0 0
49432- 0 0 0 0 0 0 0 0 0 0 0 0
49433- 0 0 0 0 0 0 0 0 0 0 0 0
49434- 0 0 0 0 0 0 0 0 0 0 0 0
49435- 0 0 0 0 0 0 0 0 0 0 0 0
49436- 0 0 0 0 0 0 0 0 0 0 0 0
49437- 0 0 0 0 0 0 0 0 0 0 0 0
49438- 0 0 0 10 10 10 26 26 26 66 66 66
49439- 82 82 82 2 2 6 38 38 38 6 6 6
49440- 14 14 14 210 210 210 253 253 253 253 253 253
49441-253 253 253 253 253 253 253 253 253 253 253 253
49442-253 253 253 253 253 253 246 246 246 242 242 242
49443-253 253 253 253 253 253 253 253 253 253 253 253
49444-253 253 253 253 253 253 253 253 253 253 253 253
49445-253 253 253 253 253 253 253 253 253 253 253 253
49446-253 253 253 253 253 253 253 253 253 253 253 253
49447-253 253 253 253 253 253 144 144 144 2 2 6
49448- 2 2 6 2 2 6 2 2 6 46 46 46
49449- 2 2 6 2 2 6 2 2 6 2 2 6
49450- 42 42 42 74 74 74 30 30 30 10 10 10
49451- 0 0 0 0 0 0 0 0 0 0 0 0
49452- 0 0 0 0 0 0 0 0 0 0 0 0
49453- 0 0 0 0 0 0 0 0 0 0 0 0
49454- 0 0 0 0 0 0 0 0 0 0 0 0
49455- 0 0 0 0 0 0 0 0 0 0 0 0
49456- 0 0 0 0 0 0 0 0 0 0 0 0
49457- 0 0 0 0 0 0 0 0 0 0 0 0
49458- 6 6 6 14 14 14 42 42 42 90 90 90
49459- 26 26 26 6 6 6 42 42 42 2 2 6
49460- 74 74 74 250 250 250 253 253 253 253 253 253
49461-253 253 253 253 253 253 253 253 253 253 253 253
49462-253 253 253 253 253 253 242 242 242 242 242 242
49463-253 253 253 253 253 253 253 253 253 253 253 253
49464-253 253 253 253 253 253 253 253 253 253 253 253
49465-253 253 253 253 253 253 253 253 253 253 253 253
49466-253 253 253 253 253 253 253 253 253 253 253 253
49467-253 253 253 253 253 253 182 182 182 2 2 6
49468- 2 2 6 2 2 6 2 2 6 46 46 46
49469- 2 2 6 2 2 6 2 2 6 2 2 6
49470- 10 10 10 86 86 86 38 38 38 10 10 10
49471- 0 0 0 0 0 0 0 0 0 0 0 0
49472- 0 0 0 0 0 0 0 0 0 0 0 0
49473- 0 0 0 0 0 0 0 0 0 0 0 0
49474- 0 0 0 0 0 0 0 0 0 0 0 0
49475- 0 0 0 0 0 0 0 0 0 0 0 0
49476- 0 0 0 0 0 0 0 0 0 0 0 0
49477- 0 0 0 0 0 0 0 0 0 0 0 0
49478- 10 10 10 26 26 26 66 66 66 82 82 82
49479- 2 2 6 22 22 22 18 18 18 2 2 6
49480-149 149 149 253 253 253 253 253 253 253 253 253
49481-253 253 253 253 253 253 253 253 253 253 253 253
49482-253 253 253 253 253 253 234 234 234 242 242 242
49483-253 253 253 253 253 253 253 253 253 253 253 253
49484-253 253 253 253 253 253 253 253 253 253 253 253
49485-253 253 253 253 253 253 253 253 253 253 253 253
49486-253 253 253 253 253 253 253 253 253 253 253 253
49487-253 253 253 253 253 253 206 206 206 2 2 6
49488- 2 2 6 2 2 6 2 2 6 38 38 38
49489- 2 2 6 2 2 6 2 2 6 2 2 6
49490- 6 6 6 86 86 86 46 46 46 14 14 14
49491- 0 0 0 0 0 0 0 0 0 0 0 0
49492- 0 0 0 0 0 0 0 0 0 0 0 0
49493- 0 0 0 0 0 0 0 0 0 0 0 0
49494- 0 0 0 0 0 0 0 0 0 0 0 0
49495- 0 0 0 0 0 0 0 0 0 0 0 0
49496- 0 0 0 0 0 0 0 0 0 0 0 0
49497- 0 0 0 0 0 0 0 0 0 6 6 6
49498- 18 18 18 46 46 46 86 86 86 18 18 18
49499- 2 2 6 34 34 34 10 10 10 6 6 6
49500-210 210 210 253 253 253 253 253 253 253 253 253
49501-253 253 253 253 253 253 253 253 253 253 253 253
49502-253 253 253 253 253 253 234 234 234 242 242 242
49503-253 253 253 253 253 253 253 253 253 253 253 253
49504-253 253 253 253 253 253 253 253 253 253 253 253
49505-253 253 253 253 253 253 253 253 253 253 253 253
49506-253 253 253 253 253 253 253 253 253 253 253 253
49507-253 253 253 253 253 253 221 221 221 6 6 6
49508- 2 2 6 2 2 6 6 6 6 30 30 30
49509- 2 2 6 2 2 6 2 2 6 2 2 6
49510- 2 2 6 82 82 82 54 54 54 18 18 18
49511- 6 6 6 0 0 0 0 0 0 0 0 0
49512- 0 0 0 0 0 0 0 0 0 0 0 0
49513- 0 0 0 0 0 0 0 0 0 0 0 0
49514- 0 0 0 0 0 0 0 0 0 0 0 0
49515- 0 0 0 0 0 0 0 0 0 0 0 0
49516- 0 0 0 0 0 0 0 0 0 0 0 0
49517- 0 0 0 0 0 0 0 0 0 10 10 10
49518- 26 26 26 66 66 66 62 62 62 2 2 6
49519- 2 2 6 38 38 38 10 10 10 26 26 26
49520-238 238 238 253 253 253 253 253 253 253 253 253
49521-253 253 253 253 253 253 253 253 253 253 253 253
49522-253 253 253 253 253 253 231 231 231 238 238 238
49523-253 253 253 253 253 253 253 253 253 253 253 253
49524-253 253 253 253 253 253 253 253 253 253 253 253
49525-253 253 253 253 253 253 253 253 253 253 253 253
49526-253 253 253 253 253 253 253 253 253 253 253 253
49527-253 253 253 253 253 253 231 231 231 6 6 6
49528- 2 2 6 2 2 6 10 10 10 30 30 30
49529- 2 2 6 2 2 6 2 2 6 2 2 6
49530- 2 2 6 66 66 66 58 58 58 22 22 22
49531- 6 6 6 0 0 0 0 0 0 0 0 0
49532- 0 0 0 0 0 0 0 0 0 0 0 0
49533- 0 0 0 0 0 0 0 0 0 0 0 0
49534- 0 0 0 0 0 0 0 0 0 0 0 0
49535- 0 0 0 0 0 0 0 0 0 0 0 0
49536- 0 0 0 0 0 0 0 0 0 0 0 0
49537- 0 0 0 0 0 0 0 0 0 10 10 10
49538- 38 38 38 78 78 78 6 6 6 2 2 6
49539- 2 2 6 46 46 46 14 14 14 42 42 42
49540-246 246 246 253 253 253 253 253 253 253 253 253
49541-253 253 253 253 253 253 253 253 253 253 253 253
49542-253 253 253 253 253 253 231 231 231 242 242 242
49543-253 253 253 253 253 253 253 253 253 253 253 253
49544-253 253 253 253 253 253 253 253 253 253 253 253
49545-253 253 253 253 253 253 253 253 253 253 253 253
49546-253 253 253 253 253 253 253 253 253 253 253 253
49547-253 253 253 253 253 253 234 234 234 10 10 10
49548- 2 2 6 2 2 6 22 22 22 14 14 14
49549- 2 2 6 2 2 6 2 2 6 2 2 6
49550- 2 2 6 66 66 66 62 62 62 22 22 22
49551- 6 6 6 0 0 0 0 0 0 0 0 0
49552- 0 0 0 0 0 0 0 0 0 0 0 0
49553- 0 0 0 0 0 0 0 0 0 0 0 0
49554- 0 0 0 0 0 0 0 0 0 0 0 0
49555- 0 0 0 0 0 0 0 0 0 0 0 0
49556- 0 0 0 0 0 0 0 0 0 0 0 0
49557- 0 0 0 0 0 0 6 6 6 18 18 18
49558- 50 50 50 74 74 74 2 2 6 2 2 6
49559- 14 14 14 70 70 70 34 34 34 62 62 62
49560-250 250 250 253 253 253 253 253 253 253 253 253
49561-253 253 253 253 253 253 253 253 253 253 253 253
49562-253 253 253 253 253 253 231 231 231 246 246 246
49563-253 253 253 253 253 253 253 253 253 253 253 253
49564-253 253 253 253 253 253 253 253 253 253 253 253
49565-253 253 253 253 253 253 253 253 253 253 253 253
49566-253 253 253 253 253 253 253 253 253 253 253 253
49567-253 253 253 253 253 253 234 234 234 14 14 14
49568- 2 2 6 2 2 6 30 30 30 2 2 6
49569- 2 2 6 2 2 6 2 2 6 2 2 6
49570- 2 2 6 66 66 66 62 62 62 22 22 22
49571- 6 6 6 0 0 0 0 0 0 0 0 0
49572- 0 0 0 0 0 0 0 0 0 0 0 0
49573- 0 0 0 0 0 0 0 0 0 0 0 0
49574- 0 0 0 0 0 0 0 0 0 0 0 0
49575- 0 0 0 0 0 0 0 0 0 0 0 0
49576- 0 0 0 0 0 0 0 0 0 0 0 0
49577- 0 0 0 0 0 0 6 6 6 18 18 18
49578- 54 54 54 62 62 62 2 2 6 2 2 6
49579- 2 2 6 30 30 30 46 46 46 70 70 70
49580-250 250 250 253 253 253 253 253 253 253 253 253
49581-253 253 253 253 253 253 253 253 253 253 253 253
49582-253 253 253 253 253 253 231 231 231 246 246 246
49583-253 253 253 253 253 253 253 253 253 253 253 253
49584-253 253 253 253 253 253 253 253 253 253 253 253
49585-253 253 253 253 253 253 253 253 253 253 253 253
49586-253 253 253 253 253 253 253 253 253 253 253 253
49587-253 253 253 253 253 253 226 226 226 10 10 10
49588- 2 2 6 6 6 6 30 30 30 2 2 6
49589- 2 2 6 2 2 6 2 2 6 2 2 6
49590- 2 2 6 66 66 66 58 58 58 22 22 22
49591- 6 6 6 0 0 0 0 0 0 0 0 0
49592- 0 0 0 0 0 0 0 0 0 0 0 0
49593- 0 0 0 0 0 0 0 0 0 0 0 0
49594- 0 0 0 0 0 0 0 0 0 0 0 0
49595- 0 0 0 0 0 0 0 0 0 0 0 0
49596- 0 0 0 0 0 0 0 0 0 0 0 0
49597- 0 0 0 0 0 0 6 6 6 22 22 22
49598- 58 58 58 62 62 62 2 2 6 2 2 6
49599- 2 2 6 2 2 6 30 30 30 78 78 78
49600-250 250 250 253 253 253 253 253 253 253 253 253
49601-253 253 253 253 253 253 253 253 253 253 253 253
49602-253 253 253 253 253 253 231 231 231 246 246 246
49603-253 253 253 253 253 253 253 253 253 253 253 253
49604-253 253 253 253 253 253 253 253 253 253 253 253
49605-253 253 253 253 253 253 253 253 253 253 253 253
49606-253 253 253 253 253 253 253 253 253 253 253 253
49607-253 253 253 253 253 253 206 206 206 2 2 6
49608- 22 22 22 34 34 34 18 14 6 22 22 22
49609- 26 26 26 18 18 18 6 6 6 2 2 6
49610- 2 2 6 82 82 82 54 54 54 18 18 18
49611- 6 6 6 0 0 0 0 0 0 0 0 0
49612- 0 0 0 0 0 0 0 0 0 0 0 0
49613- 0 0 0 0 0 0 0 0 0 0 0 0
49614- 0 0 0 0 0 0 0 0 0 0 0 0
49615- 0 0 0 0 0 0 0 0 0 0 0 0
49616- 0 0 0 0 0 0 0 0 0 0 0 0
49617- 0 0 0 0 0 0 6 6 6 26 26 26
49618- 62 62 62 106 106 106 74 54 14 185 133 11
49619-210 162 10 121 92 8 6 6 6 62 62 62
49620-238 238 238 253 253 253 253 253 253 253 253 253
49621-253 253 253 253 253 253 253 253 253 253 253 253
49622-253 253 253 253 253 253 231 231 231 246 246 246
49623-253 253 253 253 253 253 253 253 253 253 253 253
49624-253 253 253 253 253 253 253 253 253 253 253 253
49625-253 253 253 253 253 253 253 253 253 253 253 253
49626-253 253 253 253 253 253 253 253 253 253 253 253
49627-253 253 253 253 253 253 158 158 158 18 18 18
49628- 14 14 14 2 2 6 2 2 6 2 2 6
49629- 6 6 6 18 18 18 66 66 66 38 38 38
49630- 6 6 6 94 94 94 50 50 50 18 18 18
49631- 6 6 6 0 0 0 0 0 0 0 0 0
49632- 0 0 0 0 0 0 0 0 0 0 0 0
49633- 0 0 0 0 0 0 0 0 0 0 0 0
49634- 0 0 0 0 0 0 0 0 0 0 0 0
49635- 0 0 0 0 0 0 0 0 0 0 0 0
49636- 0 0 0 0 0 0 0 0 0 6 6 6
49637- 10 10 10 10 10 10 18 18 18 38 38 38
49638- 78 78 78 142 134 106 216 158 10 242 186 14
49639-246 190 14 246 190 14 156 118 10 10 10 10
49640- 90 90 90 238 238 238 253 253 253 253 253 253
49641-253 253 253 253 253 253 253 253 253 253 253 253
49642-253 253 253 253 253 253 231 231 231 250 250 250
49643-253 253 253 253 253 253 253 253 253 253 253 253
49644-253 253 253 253 253 253 253 253 253 253 253 253
49645-253 253 253 253 253 253 253 253 253 253 253 253
49646-253 253 253 253 253 253 253 253 253 246 230 190
49647-238 204 91 238 204 91 181 142 44 37 26 9
49648- 2 2 6 2 2 6 2 2 6 2 2 6
49649- 2 2 6 2 2 6 38 38 38 46 46 46
49650- 26 26 26 106 106 106 54 54 54 18 18 18
49651- 6 6 6 0 0 0 0 0 0 0 0 0
49652- 0 0 0 0 0 0 0 0 0 0 0 0
49653- 0 0 0 0 0 0 0 0 0 0 0 0
49654- 0 0 0 0 0 0 0 0 0 0 0 0
49655- 0 0 0 0 0 0 0 0 0 0 0 0
49656- 0 0 0 6 6 6 14 14 14 22 22 22
49657- 30 30 30 38 38 38 50 50 50 70 70 70
49658-106 106 106 190 142 34 226 170 11 242 186 14
49659-246 190 14 246 190 14 246 190 14 154 114 10
49660- 6 6 6 74 74 74 226 226 226 253 253 253
49661-253 253 253 253 253 253 253 253 253 253 253 253
49662-253 253 253 253 253 253 231 231 231 250 250 250
49663-253 253 253 253 253 253 253 253 253 253 253 253
49664-253 253 253 253 253 253 253 253 253 253 253 253
49665-253 253 253 253 253 253 253 253 253 253 253 253
49666-253 253 253 253 253 253 253 253 253 228 184 62
49667-241 196 14 241 208 19 232 195 16 38 30 10
49668- 2 2 6 2 2 6 2 2 6 2 2 6
49669- 2 2 6 6 6 6 30 30 30 26 26 26
49670-203 166 17 154 142 90 66 66 66 26 26 26
49671- 6 6 6 0 0 0 0 0 0 0 0 0
49672- 0 0 0 0 0 0 0 0 0 0 0 0
49673- 0 0 0 0 0 0 0 0 0 0 0 0
49674- 0 0 0 0 0 0 0 0 0 0 0 0
49675- 0 0 0 0 0 0 0 0 0 0 0 0
49676- 6 6 6 18 18 18 38 38 38 58 58 58
49677- 78 78 78 86 86 86 101 101 101 123 123 123
49678-175 146 61 210 150 10 234 174 13 246 186 14
49679-246 190 14 246 190 14 246 190 14 238 190 10
49680-102 78 10 2 2 6 46 46 46 198 198 198
49681-253 253 253 253 253 253 253 253 253 253 253 253
49682-253 253 253 253 253 253 234 234 234 242 242 242
49683-253 253 253 253 253 253 253 253 253 253 253 253
49684-253 253 253 253 253 253 253 253 253 253 253 253
49685-253 253 253 253 253 253 253 253 253 253 253 253
49686-253 253 253 253 253 253 253 253 253 224 178 62
49687-242 186 14 241 196 14 210 166 10 22 18 6
49688- 2 2 6 2 2 6 2 2 6 2 2 6
49689- 2 2 6 2 2 6 6 6 6 121 92 8
49690-238 202 15 232 195 16 82 82 82 34 34 34
49691- 10 10 10 0 0 0 0 0 0 0 0 0
49692- 0 0 0 0 0 0 0 0 0 0 0 0
49693- 0 0 0 0 0 0 0 0 0 0 0 0
49694- 0 0 0 0 0 0 0 0 0 0 0 0
49695- 0 0 0 0 0 0 0 0 0 0 0 0
49696- 14 14 14 38 38 38 70 70 70 154 122 46
49697-190 142 34 200 144 11 197 138 11 197 138 11
49698-213 154 11 226 170 11 242 186 14 246 190 14
49699-246 190 14 246 190 14 246 190 14 246 190 14
49700-225 175 15 46 32 6 2 2 6 22 22 22
49701-158 158 158 250 250 250 253 253 253 253 253 253
49702-253 253 253 253 253 253 253 253 253 253 253 253
49703-253 253 253 253 253 253 253 253 253 253 253 253
49704-253 253 253 253 253 253 253 253 253 253 253 253
49705-253 253 253 253 253 253 253 253 253 253 253 253
49706-253 253 253 250 250 250 242 242 242 224 178 62
49707-239 182 13 236 186 11 213 154 11 46 32 6
49708- 2 2 6 2 2 6 2 2 6 2 2 6
49709- 2 2 6 2 2 6 61 42 6 225 175 15
49710-238 190 10 236 186 11 112 100 78 42 42 42
49711- 14 14 14 0 0 0 0 0 0 0 0 0
49712- 0 0 0 0 0 0 0 0 0 0 0 0
49713- 0 0 0 0 0 0 0 0 0 0 0 0
49714- 0 0 0 0 0 0 0 0 0 0 0 0
49715- 0 0 0 0 0 0 0 0 0 6 6 6
49716- 22 22 22 54 54 54 154 122 46 213 154 11
49717-226 170 11 230 174 11 226 170 11 226 170 11
49718-236 178 12 242 186 14 246 190 14 246 190 14
49719-246 190 14 246 190 14 246 190 14 246 190 14
49720-241 196 14 184 144 12 10 10 10 2 2 6
49721- 6 6 6 116 116 116 242 242 242 253 253 253
49722-253 253 253 253 253 253 253 253 253 253 253 253
49723-253 253 253 253 253 253 253 253 253 253 253 253
49724-253 253 253 253 253 253 253 253 253 253 253 253
49725-253 253 253 253 253 253 253 253 253 253 253 253
49726-253 253 253 231 231 231 198 198 198 214 170 54
49727-236 178 12 236 178 12 210 150 10 137 92 6
49728- 18 14 6 2 2 6 2 2 6 2 2 6
49729- 6 6 6 70 47 6 200 144 11 236 178 12
49730-239 182 13 239 182 13 124 112 88 58 58 58
49731- 22 22 22 6 6 6 0 0 0 0 0 0
49732- 0 0 0 0 0 0 0 0 0 0 0 0
49733- 0 0 0 0 0 0 0 0 0 0 0 0
49734- 0 0 0 0 0 0 0 0 0 0 0 0
49735- 0 0 0 0 0 0 0 0 0 10 10 10
49736- 30 30 30 70 70 70 180 133 36 226 170 11
49737-239 182 13 242 186 14 242 186 14 246 186 14
49738-246 190 14 246 190 14 246 190 14 246 190 14
49739-246 190 14 246 190 14 246 190 14 246 190 14
49740-246 190 14 232 195 16 98 70 6 2 2 6
49741- 2 2 6 2 2 6 66 66 66 221 221 221
49742-253 253 253 253 253 253 253 253 253 253 253 253
49743-253 253 253 253 253 253 253 253 253 253 253 253
49744-253 253 253 253 253 253 253 253 253 253 253 253
49745-253 253 253 253 253 253 253 253 253 253 253 253
49746-253 253 253 206 206 206 198 198 198 214 166 58
49747-230 174 11 230 174 11 216 158 10 192 133 9
49748-163 110 8 116 81 8 102 78 10 116 81 8
49749-167 114 7 197 138 11 226 170 11 239 182 13
49750-242 186 14 242 186 14 162 146 94 78 78 78
49751- 34 34 34 14 14 14 6 6 6 0 0 0
49752- 0 0 0 0 0 0 0 0 0 0 0 0
49753- 0 0 0 0 0 0 0 0 0 0 0 0
49754- 0 0 0 0 0 0 0 0 0 0 0 0
49755- 0 0 0 0 0 0 0 0 0 6 6 6
49756- 30 30 30 78 78 78 190 142 34 226 170 11
49757-239 182 13 246 190 14 246 190 14 246 190 14
49758-246 190 14 246 190 14 246 190 14 246 190 14
49759-246 190 14 246 190 14 246 190 14 246 190 14
49760-246 190 14 241 196 14 203 166 17 22 18 6
49761- 2 2 6 2 2 6 2 2 6 38 38 38
49762-218 218 218 253 253 253 253 253 253 253 253 253
49763-253 253 253 253 253 253 253 253 253 253 253 253
49764-253 253 253 253 253 253 253 253 253 253 253 253
49765-253 253 253 253 253 253 253 253 253 253 253 253
49766-250 250 250 206 206 206 198 198 198 202 162 69
49767-226 170 11 236 178 12 224 166 10 210 150 10
49768-200 144 11 197 138 11 192 133 9 197 138 11
49769-210 150 10 226 170 11 242 186 14 246 190 14
49770-246 190 14 246 186 14 225 175 15 124 112 88
49771- 62 62 62 30 30 30 14 14 14 6 6 6
49772- 0 0 0 0 0 0 0 0 0 0 0 0
49773- 0 0 0 0 0 0 0 0 0 0 0 0
49774- 0 0 0 0 0 0 0 0 0 0 0 0
49775- 0 0 0 0 0 0 0 0 0 10 10 10
49776- 30 30 30 78 78 78 174 135 50 224 166 10
49777-239 182 13 246 190 14 246 190 14 246 190 14
49778-246 190 14 246 190 14 246 190 14 246 190 14
49779-246 190 14 246 190 14 246 190 14 246 190 14
49780-246 190 14 246 190 14 241 196 14 139 102 15
49781- 2 2 6 2 2 6 2 2 6 2 2 6
49782- 78 78 78 250 250 250 253 253 253 253 253 253
49783-253 253 253 253 253 253 253 253 253 253 253 253
49784-253 253 253 253 253 253 253 253 253 253 253 253
49785-253 253 253 253 253 253 253 253 253 253 253 253
49786-250 250 250 214 214 214 198 198 198 190 150 46
49787-219 162 10 236 178 12 234 174 13 224 166 10
49788-216 158 10 213 154 11 213 154 11 216 158 10
49789-226 170 11 239 182 13 246 190 14 246 190 14
49790-246 190 14 246 190 14 242 186 14 206 162 42
49791-101 101 101 58 58 58 30 30 30 14 14 14
49792- 6 6 6 0 0 0 0 0 0 0 0 0
49793- 0 0 0 0 0 0 0 0 0 0 0 0
49794- 0 0 0 0 0 0 0 0 0 0 0 0
49795- 0 0 0 0 0 0 0 0 0 10 10 10
49796- 30 30 30 74 74 74 174 135 50 216 158 10
49797-236 178 12 246 190 14 246 190 14 246 190 14
49798-246 190 14 246 190 14 246 190 14 246 190 14
49799-246 190 14 246 190 14 246 190 14 246 190 14
49800-246 190 14 246 190 14 241 196 14 226 184 13
49801- 61 42 6 2 2 6 2 2 6 2 2 6
49802- 22 22 22 238 238 238 253 253 253 253 253 253
49803-253 253 253 253 253 253 253 253 253 253 253 253
49804-253 253 253 253 253 253 253 253 253 253 253 253
49805-253 253 253 253 253 253 253 253 253 253 253 253
49806-253 253 253 226 226 226 187 187 187 180 133 36
49807-216 158 10 236 178 12 239 182 13 236 178 12
49808-230 174 11 226 170 11 226 170 11 230 174 11
49809-236 178 12 242 186 14 246 190 14 246 190 14
49810-246 190 14 246 190 14 246 186 14 239 182 13
49811-206 162 42 106 106 106 66 66 66 34 34 34
49812- 14 14 14 6 6 6 0 0 0 0 0 0
49813- 0 0 0 0 0 0 0 0 0 0 0 0
49814- 0 0 0 0 0 0 0 0 0 0 0 0
49815- 0 0 0 0 0 0 0 0 0 6 6 6
49816- 26 26 26 70 70 70 163 133 67 213 154 11
49817-236 178 12 246 190 14 246 190 14 246 190 14
49818-246 190 14 246 190 14 246 190 14 246 190 14
49819-246 190 14 246 190 14 246 190 14 246 190 14
49820-246 190 14 246 190 14 246 190 14 241 196 14
49821-190 146 13 18 14 6 2 2 6 2 2 6
49822- 46 46 46 246 246 246 253 253 253 253 253 253
49823-253 253 253 253 253 253 253 253 253 253 253 253
49824-253 253 253 253 253 253 253 253 253 253 253 253
49825-253 253 253 253 253 253 253 253 253 253 253 253
49826-253 253 253 221 221 221 86 86 86 156 107 11
49827-216 158 10 236 178 12 242 186 14 246 186 14
49828-242 186 14 239 182 13 239 182 13 242 186 14
49829-242 186 14 246 186 14 246 190 14 246 190 14
49830-246 190 14 246 190 14 246 190 14 246 190 14
49831-242 186 14 225 175 15 142 122 72 66 66 66
49832- 30 30 30 10 10 10 0 0 0 0 0 0
49833- 0 0 0 0 0 0 0 0 0 0 0 0
49834- 0 0 0 0 0 0 0 0 0 0 0 0
49835- 0 0 0 0 0 0 0 0 0 6 6 6
49836- 26 26 26 70 70 70 163 133 67 210 150 10
49837-236 178 12 246 190 14 246 190 14 246 190 14
49838-246 190 14 246 190 14 246 190 14 246 190 14
49839-246 190 14 246 190 14 246 190 14 246 190 14
49840-246 190 14 246 190 14 246 190 14 246 190 14
49841-232 195 16 121 92 8 34 34 34 106 106 106
49842-221 221 221 253 253 253 253 253 253 253 253 253
49843-253 253 253 253 253 253 253 253 253 253 253 253
49844-253 253 253 253 253 253 253 253 253 253 253 253
49845-253 253 253 253 253 253 253 253 253 253 253 253
49846-242 242 242 82 82 82 18 14 6 163 110 8
49847-216 158 10 236 178 12 242 186 14 246 190 14
49848-246 190 14 246 190 14 246 190 14 246 190 14
49849-246 190 14 246 190 14 246 190 14 246 190 14
49850-246 190 14 246 190 14 246 190 14 246 190 14
49851-246 190 14 246 190 14 242 186 14 163 133 67
49852- 46 46 46 18 18 18 6 6 6 0 0 0
49853- 0 0 0 0 0 0 0 0 0 0 0 0
49854- 0 0 0 0 0 0 0 0 0 0 0 0
49855- 0 0 0 0 0 0 0 0 0 10 10 10
49856- 30 30 30 78 78 78 163 133 67 210 150 10
49857-236 178 12 246 186 14 246 190 14 246 190 14
49858-246 190 14 246 190 14 246 190 14 246 190 14
49859-246 190 14 246 190 14 246 190 14 246 190 14
49860-246 190 14 246 190 14 246 190 14 246 190 14
49861-241 196 14 215 174 15 190 178 144 253 253 253
49862-253 253 253 253 253 253 253 253 253 253 253 253
49863-253 253 253 253 253 253 253 253 253 253 253 253
49864-253 253 253 253 253 253 253 253 253 253 253 253
49865-253 253 253 253 253 253 253 253 253 218 218 218
49866- 58 58 58 2 2 6 22 18 6 167 114 7
49867-216 158 10 236 178 12 246 186 14 246 190 14
49868-246 190 14 246 190 14 246 190 14 246 190 14
49869-246 190 14 246 190 14 246 190 14 246 190 14
49870-246 190 14 246 190 14 246 190 14 246 190 14
49871-246 190 14 246 186 14 242 186 14 190 150 46
49872- 54 54 54 22 22 22 6 6 6 0 0 0
49873- 0 0 0 0 0 0 0 0 0 0 0 0
49874- 0 0 0 0 0 0 0 0 0 0 0 0
49875- 0 0 0 0 0 0 0 0 0 14 14 14
49876- 38 38 38 86 86 86 180 133 36 213 154 11
49877-236 178 12 246 186 14 246 190 14 246 190 14
49878-246 190 14 246 190 14 246 190 14 246 190 14
49879-246 190 14 246 190 14 246 190 14 246 190 14
49880-246 190 14 246 190 14 246 190 14 246 190 14
49881-246 190 14 232 195 16 190 146 13 214 214 214
49882-253 253 253 253 253 253 253 253 253 253 253 253
49883-253 253 253 253 253 253 253 253 253 253 253 253
49884-253 253 253 253 253 253 253 253 253 253 253 253
49885-253 253 253 250 250 250 170 170 170 26 26 26
49886- 2 2 6 2 2 6 37 26 9 163 110 8
49887-219 162 10 239 182 13 246 186 14 246 190 14
49888-246 190 14 246 190 14 246 190 14 246 190 14
49889-246 190 14 246 190 14 246 190 14 246 190 14
49890-246 190 14 246 190 14 246 190 14 246 190 14
49891-246 186 14 236 178 12 224 166 10 142 122 72
49892- 46 46 46 18 18 18 6 6 6 0 0 0
49893- 0 0 0 0 0 0 0 0 0 0 0 0
49894- 0 0 0 0 0 0 0 0 0 0 0 0
49895- 0 0 0 0 0 0 6 6 6 18 18 18
49896- 50 50 50 109 106 95 192 133 9 224 166 10
49897-242 186 14 246 190 14 246 190 14 246 190 14
49898-246 190 14 246 190 14 246 190 14 246 190 14
49899-246 190 14 246 190 14 246 190 14 246 190 14
49900-246 190 14 246 190 14 246 190 14 246 190 14
49901-242 186 14 226 184 13 210 162 10 142 110 46
49902-226 226 226 253 253 253 253 253 253 253 253 253
49903-253 253 253 253 253 253 253 253 253 253 253 253
49904-253 253 253 253 253 253 253 253 253 253 253 253
49905-198 198 198 66 66 66 2 2 6 2 2 6
49906- 2 2 6 2 2 6 50 34 6 156 107 11
49907-219 162 10 239 182 13 246 186 14 246 190 14
49908-246 190 14 246 190 14 246 190 14 246 190 14
49909-246 190 14 246 190 14 246 190 14 246 190 14
49910-246 190 14 246 190 14 246 190 14 242 186 14
49911-234 174 13 213 154 11 154 122 46 66 66 66
49912- 30 30 30 10 10 10 0 0 0 0 0 0
49913- 0 0 0 0 0 0 0 0 0 0 0 0
49914- 0 0 0 0 0 0 0 0 0 0 0 0
49915- 0 0 0 0 0 0 6 6 6 22 22 22
49916- 58 58 58 154 121 60 206 145 10 234 174 13
49917-242 186 14 246 186 14 246 190 14 246 190 14
49918-246 190 14 246 190 14 246 190 14 246 190 14
49919-246 190 14 246 190 14 246 190 14 246 190 14
49920-246 190 14 246 190 14 246 190 14 246 190 14
49921-246 186 14 236 178 12 210 162 10 163 110 8
49922- 61 42 6 138 138 138 218 218 218 250 250 250
49923-253 253 253 253 253 253 253 253 253 250 250 250
49924-242 242 242 210 210 210 144 144 144 66 66 66
49925- 6 6 6 2 2 6 2 2 6 2 2 6
49926- 2 2 6 2 2 6 61 42 6 163 110 8
49927-216 158 10 236 178 12 246 190 14 246 190 14
49928-246 190 14 246 190 14 246 190 14 246 190 14
49929-246 190 14 246 190 14 246 190 14 246 190 14
49930-246 190 14 239 182 13 230 174 11 216 158 10
49931-190 142 34 124 112 88 70 70 70 38 38 38
49932- 18 18 18 6 6 6 0 0 0 0 0 0
49933- 0 0 0 0 0 0 0 0 0 0 0 0
49934- 0 0 0 0 0 0 0 0 0 0 0 0
49935- 0 0 0 0 0 0 6 6 6 22 22 22
49936- 62 62 62 168 124 44 206 145 10 224 166 10
49937-236 178 12 239 182 13 242 186 14 242 186 14
49938-246 186 14 246 190 14 246 190 14 246 190 14
49939-246 190 14 246 190 14 246 190 14 246 190 14
49940-246 190 14 246 190 14 246 190 14 246 190 14
49941-246 190 14 236 178 12 216 158 10 175 118 6
49942- 80 54 7 2 2 6 6 6 6 30 30 30
49943- 54 54 54 62 62 62 50 50 50 38 38 38
49944- 14 14 14 2 2 6 2 2 6 2 2 6
49945- 2 2 6 2 2 6 2 2 6 2 2 6
49946- 2 2 6 6 6 6 80 54 7 167 114 7
49947-213 154 11 236 178 12 246 190 14 246 190 14
49948-246 190 14 246 190 14 246 190 14 246 190 14
49949-246 190 14 242 186 14 239 182 13 239 182 13
49950-230 174 11 210 150 10 174 135 50 124 112 88
49951- 82 82 82 54 54 54 34 34 34 18 18 18
49952- 6 6 6 0 0 0 0 0 0 0 0 0
49953- 0 0 0 0 0 0 0 0 0 0 0 0
49954- 0 0 0 0 0 0 0 0 0 0 0 0
49955- 0 0 0 0 0 0 6 6 6 18 18 18
49956- 50 50 50 158 118 36 192 133 9 200 144 11
49957-216 158 10 219 162 10 224 166 10 226 170 11
49958-230 174 11 236 178 12 239 182 13 239 182 13
49959-242 186 14 246 186 14 246 190 14 246 190 14
49960-246 190 14 246 190 14 246 190 14 246 190 14
49961-246 186 14 230 174 11 210 150 10 163 110 8
49962-104 69 6 10 10 10 2 2 6 2 2 6
49963- 2 2 6 2 2 6 2 2 6 2 2 6
49964- 2 2 6 2 2 6 2 2 6 2 2 6
49965- 2 2 6 2 2 6 2 2 6 2 2 6
49966- 2 2 6 6 6 6 91 60 6 167 114 7
49967-206 145 10 230 174 11 242 186 14 246 190 14
49968-246 190 14 246 190 14 246 186 14 242 186 14
49969-239 182 13 230 174 11 224 166 10 213 154 11
49970-180 133 36 124 112 88 86 86 86 58 58 58
49971- 38 38 38 22 22 22 10 10 10 6 6 6
49972- 0 0 0 0 0 0 0 0 0 0 0 0
49973- 0 0 0 0 0 0 0 0 0 0 0 0
49974- 0 0 0 0 0 0 0 0 0 0 0 0
49975- 0 0 0 0 0 0 0 0 0 14 14 14
49976- 34 34 34 70 70 70 138 110 50 158 118 36
49977-167 114 7 180 123 7 192 133 9 197 138 11
49978-200 144 11 206 145 10 213 154 11 219 162 10
49979-224 166 10 230 174 11 239 182 13 242 186 14
49980-246 186 14 246 186 14 246 186 14 246 186 14
49981-239 182 13 216 158 10 185 133 11 152 99 6
49982-104 69 6 18 14 6 2 2 6 2 2 6
49983- 2 2 6 2 2 6 2 2 6 2 2 6
49984- 2 2 6 2 2 6 2 2 6 2 2 6
49985- 2 2 6 2 2 6 2 2 6 2 2 6
49986- 2 2 6 6 6 6 80 54 7 152 99 6
49987-192 133 9 219 162 10 236 178 12 239 182 13
49988-246 186 14 242 186 14 239 182 13 236 178 12
49989-224 166 10 206 145 10 192 133 9 154 121 60
49990- 94 94 94 62 62 62 42 42 42 22 22 22
49991- 14 14 14 6 6 6 0 0 0 0 0 0
49992- 0 0 0 0 0 0 0 0 0 0 0 0
49993- 0 0 0 0 0 0 0 0 0 0 0 0
49994- 0 0 0 0 0 0 0 0 0 0 0 0
49995- 0 0 0 0 0 0 0 0 0 6 6 6
49996- 18 18 18 34 34 34 58 58 58 78 78 78
49997-101 98 89 124 112 88 142 110 46 156 107 11
49998-163 110 8 167 114 7 175 118 6 180 123 7
49999-185 133 11 197 138 11 210 150 10 219 162 10
50000-226 170 11 236 178 12 236 178 12 234 174 13
50001-219 162 10 197 138 11 163 110 8 130 83 6
50002- 91 60 6 10 10 10 2 2 6 2 2 6
50003- 18 18 18 38 38 38 38 38 38 38 38 38
50004- 38 38 38 38 38 38 38 38 38 38 38 38
50005- 38 38 38 38 38 38 26 26 26 2 2 6
50006- 2 2 6 6 6 6 70 47 6 137 92 6
50007-175 118 6 200 144 11 219 162 10 230 174 11
50008-234 174 13 230 174 11 219 162 10 210 150 10
50009-192 133 9 163 110 8 124 112 88 82 82 82
50010- 50 50 50 30 30 30 14 14 14 6 6 6
50011- 0 0 0 0 0 0 0 0 0 0 0 0
50012- 0 0 0 0 0 0 0 0 0 0 0 0
50013- 0 0 0 0 0 0 0 0 0 0 0 0
50014- 0 0 0 0 0 0 0 0 0 0 0 0
50015- 0 0 0 0 0 0 0 0 0 0 0 0
50016- 6 6 6 14 14 14 22 22 22 34 34 34
50017- 42 42 42 58 58 58 74 74 74 86 86 86
50018-101 98 89 122 102 70 130 98 46 121 87 25
50019-137 92 6 152 99 6 163 110 8 180 123 7
50020-185 133 11 197 138 11 206 145 10 200 144 11
50021-180 123 7 156 107 11 130 83 6 104 69 6
50022- 50 34 6 54 54 54 110 110 110 101 98 89
50023- 86 86 86 82 82 82 78 78 78 78 78 78
50024- 78 78 78 78 78 78 78 78 78 78 78 78
50025- 78 78 78 82 82 82 86 86 86 94 94 94
50026-106 106 106 101 101 101 86 66 34 124 80 6
50027-156 107 11 180 123 7 192 133 9 200 144 11
50028-206 145 10 200 144 11 192 133 9 175 118 6
50029-139 102 15 109 106 95 70 70 70 42 42 42
50030- 22 22 22 10 10 10 0 0 0 0 0 0
50031- 0 0 0 0 0 0 0 0 0 0 0 0
50032- 0 0 0 0 0 0 0 0 0 0 0 0
50033- 0 0 0 0 0 0 0 0 0 0 0 0
50034- 0 0 0 0 0 0 0 0 0 0 0 0
50035- 0 0 0 0 0 0 0 0 0 0 0 0
50036- 0 0 0 0 0 0 6 6 6 10 10 10
50037- 14 14 14 22 22 22 30 30 30 38 38 38
50038- 50 50 50 62 62 62 74 74 74 90 90 90
50039-101 98 89 112 100 78 121 87 25 124 80 6
50040-137 92 6 152 99 6 152 99 6 152 99 6
50041-138 86 6 124 80 6 98 70 6 86 66 30
50042-101 98 89 82 82 82 58 58 58 46 46 46
50043- 38 38 38 34 34 34 34 34 34 34 34 34
50044- 34 34 34 34 34 34 34 34 34 34 34 34
50045- 34 34 34 34 34 34 38 38 38 42 42 42
50046- 54 54 54 82 82 82 94 86 76 91 60 6
50047-134 86 6 156 107 11 167 114 7 175 118 6
50048-175 118 6 167 114 7 152 99 6 121 87 25
50049-101 98 89 62 62 62 34 34 34 18 18 18
50050- 6 6 6 0 0 0 0 0 0 0 0 0
50051- 0 0 0 0 0 0 0 0 0 0 0 0
50052- 0 0 0 0 0 0 0 0 0 0 0 0
50053- 0 0 0 0 0 0 0 0 0 0 0 0
50054- 0 0 0 0 0 0 0 0 0 0 0 0
50055- 0 0 0 0 0 0 0 0 0 0 0 0
50056- 0 0 0 0 0 0 0 0 0 0 0 0
50057- 0 0 0 6 6 6 6 6 6 10 10 10
50058- 18 18 18 22 22 22 30 30 30 42 42 42
50059- 50 50 50 66 66 66 86 86 86 101 98 89
50060-106 86 58 98 70 6 104 69 6 104 69 6
50061-104 69 6 91 60 6 82 62 34 90 90 90
50062- 62 62 62 38 38 38 22 22 22 14 14 14
50063- 10 10 10 10 10 10 10 10 10 10 10 10
50064- 10 10 10 10 10 10 6 6 6 10 10 10
50065- 10 10 10 10 10 10 10 10 10 14 14 14
50066- 22 22 22 42 42 42 70 70 70 89 81 66
50067- 80 54 7 104 69 6 124 80 6 137 92 6
50068-134 86 6 116 81 8 100 82 52 86 86 86
50069- 58 58 58 30 30 30 14 14 14 6 6 6
50070- 0 0 0 0 0 0 0 0 0 0 0 0
50071- 0 0 0 0 0 0 0 0 0 0 0 0
50072- 0 0 0 0 0 0 0 0 0 0 0 0
50073- 0 0 0 0 0 0 0 0 0 0 0 0
50074- 0 0 0 0 0 0 0 0 0 0 0 0
50075- 0 0 0 0 0 0 0 0 0 0 0 0
50076- 0 0 0 0 0 0 0 0 0 0 0 0
50077- 0 0 0 0 0 0 0 0 0 0 0 0
50078- 0 0 0 6 6 6 10 10 10 14 14 14
50079- 18 18 18 26 26 26 38 38 38 54 54 54
50080- 70 70 70 86 86 86 94 86 76 89 81 66
50081- 89 81 66 86 86 86 74 74 74 50 50 50
50082- 30 30 30 14 14 14 6 6 6 0 0 0
50083- 0 0 0 0 0 0 0 0 0 0 0 0
50084- 0 0 0 0 0 0 0 0 0 0 0 0
50085- 0 0 0 0 0 0 0 0 0 0 0 0
50086- 6 6 6 18 18 18 34 34 34 58 58 58
50087- 82 82 82 89 81 66 89 81 66 89 81 66
50088- 94 86 66 94 86 76 74 74 74 50 50 50
50089- 26 26 26 14 14 14 6 6 6 0 0 0
50090- 0 0 0 0 0 0 0 0 0 0 0 0
50091- 0 0 0 0 0 0 0 0 0 0 0 0
50092- 0 0 0 0 0 0 0 0 0 0 0 0
50093- 0 0 0 0 0 0 0 0 0 0 0 0
50094- 0 0 0 0 0 0 0 0 0 0 0 0
50095- 0 0 0 0 0 0 0 0 0 0 0 0
50096- 0 0 0 0 0 0 0 0 0 0 0 0
50097- 0 0 0 0 0 0 0 0 0 0 0 0
50098- 0 0 0 0 0 0 0 0 0 0 0 0
50099- 6 6 6 6 6 6 14 14 14 18 18 18
50100- 30 30 30 38 38 38 46 46 46 54 54 54
50101- 50 50 50 42 42 42 30 30 30 18 18 18
50102- 10 10 10 0 0 0 0 0 0 0 0 0
50103- 0 0 0 0 0 0 0 0 0 0 0 0
50104- 0 0 0 0 0 0 0 0 0 0 0 0
50105- 0 0 0 0 0 0 0 0 0 0 0 0
50106- 0 0 0 6 6 6 14 14 14 26 26 26
50107- 38 38 38 50 50 50 58 58 58 58 58 58
50108- 54 54 54 42 42 42 30 30 30 18 18 18
50109- 10 10 10 0 0 0 0 0 0 0 0 0
50110- 0 0 0 0 0 0 0 0 0 0 0 0
50111- 0 0 0 0 0 0 0 0 0 0 0 0
50112- 0 0 0 0 0 0 0 0 0 0 0 0
50113- 0 0 0 0 0 0 0 0 0 0 0 0
50114- 0 0 0 0 0 0 0 0 0 0 0 0
50115- 0 0 0 0 0 0 0 0 0 0 0 0
50116- 0 0 0 0 0 0 0 0 0 0 0 0
50117- 0 0 0 0 0 0 0 0 0 0 0 0
50118- 0 0 0 0 0 0 0 0 0 0 0 0
50119- 0 0 0 0 0 0 0 0 0 6 6 6
50120- 6 6 6 10 10 10 14 14 14 18 18 18
50121- 18 18 18 14 14 14 10 10 10 6 6 6
50122- 0 0 0 0 0 0 0 0 0 0 0 0
50123- 0 0 0 0 0 0 0 0 0 0 0 0
50124- 0 0 0 0 0 0 0 0 0 0 0 0
50125- 0 0 0 0 0 0 0 0 0 0 0 0
50126- 0 0 0 0 0 0 0 0 0 6 6 6
50127- 14 14 14 18 18 18 22 22 22 22 22 22
50128- 18 18 18 14 14 14 10 10 10 6 6 6
50129- 0 0 0 0 0 0 0 0 0 0 0 0
50130- 0 0 0 0 0 0 0 0 0 0 0 0
50131- 0 0 0 0 0 0 0 0 0 0 0 0
50132- 0 0 0 0 0 0 0 0 0 0 0 0
50133- 0 0 0 0 0 0 0 0 0 0 0 0
50134+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50135+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50136+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50137+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50138+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50139+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50140+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50141+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50142+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50143+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50144+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50145+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50146+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50147+4 4 4 4 4 4
50148+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50149+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50150+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50151+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50152+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50153+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50154+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50155+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50156+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50157+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50158+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50159+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50160+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50161+4 4 4 4 4 4
50162+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50163+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50164+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50165+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50166+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50167+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50168+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50169+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50170+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50171+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50172+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50173+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50174+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50175+4 4 4 4 4 4
50176+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50177+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50178+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50179+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50180+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50181+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50182+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50183+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50184+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50185+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50186+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50187+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50188+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50189+4 4 4 4 4 4
50190+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50191+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50192+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50193+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50194+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50195+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50196+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50197+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50198+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50199+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50200+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50201+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50202+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50203+4 4 4 4 4 4
50204+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50205+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50206+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50207+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50208+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50209+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50210+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50211+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50212+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50213+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50214+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50215+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50216+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50217+4 4 4 4 4 4
50218+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50219+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50220+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50221+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50222+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
50223+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
50224+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50225+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50226+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50227+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
50228+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
50229+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
50230+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50231+4 4 4 4 4 4
50232+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50233+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50234+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50235+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50236+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
50237+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
50238+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50239+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50240+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50241+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
50242+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
50243+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
50244+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50245+4 4 4 4 4 4
50246+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50248+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50249+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50250+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
50251+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
50252+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
50253+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50254+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50255+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
50256+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
50257+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
50258+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
50259+4 4 4 4 4 4
50260+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50262+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50263+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
50264+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
50265+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
50266+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
50267+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50268+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
50269+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
50270+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
50271+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
50272+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
50273+4 4 4 4 4 4
50274+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50276+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50277+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
50278+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
50279+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
50280+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
50281+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
50282+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
50283+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
50284+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
50285+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
50286+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
50287+4 4 4 4 4 4
50288+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50290+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
50291+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
50292+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
50293+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
50294+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
50295+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
50296+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
50297+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
50298+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
50299+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
50300+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
50301+4 4 4 4 4 4
50302+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50303+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50304+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
50305+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
50306+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
50307+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
50308+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
50309+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
50310+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
50311+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
50312+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
50313+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
50314+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
50315+4 4 4 4 4 4
50316+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50317+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50318+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
50319+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
50320+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
50321+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
50322+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
50323+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
50324+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
50325+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
50326+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
50327+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
50328+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
50329+4 4 4 4 4 4
50330+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50331+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50332+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
50333+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
50334+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
50335+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
50336+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
50337+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
50338+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
50339+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
50340+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
50341+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
50342+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
50343+4 4 4 4 4 4
50344+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50345+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50346+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
50347+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
50348+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
50349+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
50350+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
50351+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
50352+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
50353+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
50354+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
50355+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
50356+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
50357+4 4 4 4 4 4
50358+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50359+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
50360+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
50361+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
50362+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
50363+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
50364+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
50365+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
50366+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
50367+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
50368+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
50369+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
50370+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
50371+4 4 4 4 4 4
50372+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50373+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
50374+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
50375+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
50376+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
50377+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
50378+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
50379+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
50380+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
50381+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
50382+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
50383+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
50384+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
50385+0 0 0 4 4 4
50386+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
50387+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
50388+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
50389+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
50390+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
50391+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
50392+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
50393+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
50394+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
50395+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
50396+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
50397+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
50398+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
50399+2 0 0 0 0 0
50400+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
50401+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
50402+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
50403+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
50404+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
50405+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
50406+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
50407+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
50408+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
50409+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
50410+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
50411+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
50412+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
50413+37 38 37 0 0 0
50414+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
50415+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
50416+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
50417+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
50418+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
50419+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
50420+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
50421+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
50422+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
50423+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
50424+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
50425+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
50426+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
50427+85 115 134 4 0 0
50428+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
50429+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
50430+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
50431+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
50432+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
50433+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
50434+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
50435+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
50436+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
50437+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
50438+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
50439+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
50440+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
50441+60 73 81 4 0 0
50442+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
50443+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
50444+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
50445+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
50446+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
50447+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
50448+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
50449+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
50450+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
50451+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
50452+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
50453+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
50454+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
50455+16 19 21 4 0 0
50456+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
50457+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
50458+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
50459+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
50460+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
50461+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
50462+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
50463+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
50464+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
50465+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
50466+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
50467+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
50468+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
50469+4 0 0 4 3 3
50470+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
50471+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
50472+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
50473+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
50474+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
50475+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
50476+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
50477+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
50478+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
50479+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
50480+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
50481+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
50482+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
50483+3 2 2 4 4 4
50484+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
50485+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
50486+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
50487+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
50488+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
50489+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
50490+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
50491+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
50492+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
50493+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
50494+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
50495+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
50496+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
50497+4 4 4 4 4 4
50498+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
50499+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
50500+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
50501+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
50502+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
50503+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
50504+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
50505+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
50506+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
50507+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
50508+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
50509+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
50510+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
50511+4 4 4 4 4 4
50512+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
50513+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
50514+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
50515+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
50516+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
50517+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
50518+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
50519+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
50520+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
50521+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
50522+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
50523+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
50524+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
50525+5 5 5 5 5 5
50526+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
50527+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
50528+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
50529+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
50530+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
50531+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
50532+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
50533+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
50534+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
50535+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
50536+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
50537+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
50538+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
50539+5 5 5 4 4 4
50540+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
50541+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
50542+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
50543+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
50544+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
50545+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
50546+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
50547+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
50548+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
50549+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
50550+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
50551+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
50552+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50553+4 4 4 4 4 4
50554+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
50555+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
50556+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
50557+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
50558+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
50559+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
50560+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
50561+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
50562+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
50563+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
50564+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
50565+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
50566+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50567+4 4 4 4 4 4
50568+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
50569+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
50570+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
50571+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
50572+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
50573+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
50574+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
50575+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
50576+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
50577+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
50578+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
50579+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50580+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50581+4 4 4 4 4 4
50582+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
50583+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
50584+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
50585+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
50586+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
50587+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
50588+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
50589+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
50590+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
50591+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
50592+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
50593+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50594+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50595+4 4 4 4 4 4
50596+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
50597+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
50598+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
50599+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
50600+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
50601+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
50602+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
50603+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
50604+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
50605+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
50606+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50607+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50608+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50609+4 4 4 4 4 4
50610+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
50611+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
50612+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
50613+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
50614+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
50615+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
50616+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
50617+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
50618+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
50619+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
50620+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
50621+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50622+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50623+4 4 4 4 4 4
50624+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
50625+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
50626+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
50627+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
50628+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
50629+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
50630+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
50631+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
50632+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
50633+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
50634+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
50635+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50636+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50637+4 4 4 4 4 4
50638+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
50639+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
50640+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
50641+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
50642+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
50643+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
50644+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
50645+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
50646+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
50647+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
50648+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50649+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50650+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50651+4 4 4 4 4 4
50652+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
50653+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
50654+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
50655+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
50656+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
50657+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
50658+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
50659+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
50660+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
50661+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
50662+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50663+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50664+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50665+4 4 4 4 4 4
50666+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
50667+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
50668+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
50669+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
50670+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
50671+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
50672+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
50673+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
50674+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
50675+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
50676+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50677+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50678+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50679+4 4 4 4 4 4
50680+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
50681+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
50682+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
50683+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
50684+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
50685+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
50686+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
50687+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
50688+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
50689+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50690+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50691+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50692+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50693+4 4 4 4 4 4
50694+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
50695+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
50696+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
50697+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
50698+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
50699+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
50700+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
50701+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
50702+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
50703+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50704+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50705+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50706+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50707+4 4 4 4 4 4
50708+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
50709+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
50710+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
50711+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
50712+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
50713+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
50714+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
50715+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
50716+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
50717+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50718+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50719+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50720+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50721+4 4 4 4 4 4
50722+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
50723+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
50724+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
50725+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
50726+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
50727+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
50728+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
50729+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
50730+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
50731+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50732+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50733+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50734+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50735+4 4 4 4 4 4
50736+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
50737+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
50738+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
50739+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
50740+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
50741+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
50742+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
50743+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
50744+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
50745+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50746+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50747+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50748+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50749+4 4 4 4 4 4
50750+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
50751+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
50752+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
50753+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
50754+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
50755+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
50756+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
50757+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
50758+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
50759+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50760+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50761+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50762+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50763+4 4 4 4 4 4
50764+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
50765+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
50766+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
50767+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
50768+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
50769+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
50770+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
50771+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
50772+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
50773+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50774+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50775+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50776+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50777+4 4 4 4 4 4
50778+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
50779+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
50780+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
50781+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
50782+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
50783+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
50784+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
50785+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
50786+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
50787+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50788+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50789+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50790+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50791+4 4 4 4 4 4
50792+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
50793+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
50794+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
50795+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
50796+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
50797+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
50798+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
50799+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
50800+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
50801+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50802+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50803+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50804+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50805+4 4 4 4 4 4
50806+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
50807+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
50808+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
50809+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
50810+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
50811+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
50812+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
50813+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
50814+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
50815+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50816+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50817+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50818+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50819+4 4 4 4 4 4
50820+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
50821+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
50822+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
50823+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
50824+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
50825+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
50826+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
50827+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
50828+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
50829+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50830+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50831+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50832+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50833+4 4 4 4 4 4
50834+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
50835+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
50836+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
50837+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
50838+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
50839+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
50840+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
50841+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
50842+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
50843+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50844+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50845+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50846+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50847+4 4 4 4 4 4
50848+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
50849+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
50850+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
50851+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
50852+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
50853+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
50854+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
50855+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
50856+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
50857+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50858+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50859+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50860+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50861+4 4 4 4 4 4
50862+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
50863+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
50864+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
50865+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
50866+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
50867+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
50868+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
50869+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
50870+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
50871+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50872+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50873+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50874+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50875+4 4 4 4 4 4
50876+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
50877+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
50878+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
50879+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
50880+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
50881+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
50882+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
50883+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
50884+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
50885+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50886+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50887+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50888+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50889+4 4 4 4 4 4
50890+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
50891+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
50892+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
50893+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
50894+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
50895+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
50896+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
50897+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
50898+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
50899+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
50900+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50901+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50902+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50903+4 4 4 4 4 4
50904+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
50905+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
50906+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
50907+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
50908+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
50909+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
50910+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
50911+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
50912+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
50913+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
50914+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50915+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50916+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50917+4 4 4 4 4 4
50918+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
50919+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
50920+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
50921+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
50922+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
50923+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
50924+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50925+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
50926+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
50927+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
50928+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
50929+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50930+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50931+4 4 4 4 4 4
50932+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
50933+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
50934+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
50935+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
50936+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
50937+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
50938+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
50939+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
50940+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
50941+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
50942+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50943+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50944+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50945+4 4 4 4 4 4
50946+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
50947+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
50948+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
50949+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
50950+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
50951+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
50952+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
50953+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
50954+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
50955+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
50956+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50957+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50958+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50959+4 4 4 4 4 4
50960+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
50961+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
50962+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
50963+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
50964+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
50965+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
50966+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
50967+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
50968+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
50969+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
50970+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50971+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50972+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50973+4 4 4 4 4 4
50974+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
50975+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
50976+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
50977+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
50978+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
50979+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
50980+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
50981+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
50982+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
50983+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
50984+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50985+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50986+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50987+4 4 4 4 4 4
50988+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
50989+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
50990+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
50991+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
50992+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
50993+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
50994+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
50995+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
50996+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
50997+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
50998+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50999+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51000+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51001+4 4 4 4 4 4
51002+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
51003+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
51004+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
51005+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
51006+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
51007+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
51008+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
51009+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
51010+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
51011+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51012+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51013+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51014+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51015+4 4 4 4 4 4
51016+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
51017+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
51018+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
51019+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
51020+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
51021+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
51022+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
51023+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
51024+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
51025+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51026+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51027+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51028+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51029+4 4 4 4 4 4
51030+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
51031+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
51032+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
51033+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
51034+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
51035+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
51036+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
51037+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
51038+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51039+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51040+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51041+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51042+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51043+4 4 4 4 4 4
51044+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
51045+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
51046+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
51047+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
51048+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
51049+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
51050+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
51051+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
51052+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51053+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51054+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51055+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51056+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51057+4 4 4 4 4 4
51058+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
51059+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
51060+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
51061+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
51062+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
51063+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
51064+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
51065+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
51066+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51067+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51068+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51069+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51070+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51071+4 4 4 4 4 4
51072+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
51073+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
51074+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
51075+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
51076+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
51077+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
51078+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
51079+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
51080+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51081+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51082+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51083+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51084+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51085+4 4 4 4 4 4
51086+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51087+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
51088+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
51089+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
51090+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
51091+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
51092+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
51093+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
51094+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51095+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51096+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51097+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51098+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51099+4 4 4 4 4 4
51100+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51101+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
51102+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
51103+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
51104+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
51105+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
51106+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
51107+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
51108+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51109+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51110+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51111+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51112+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51113+4 4 4 4 4 4
51114+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51115+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51116+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
51117+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
51118+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
51119+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
51120+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
51121+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
51122+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51123+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51124+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51125+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51126+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51127+4 4 4 4 4 4
51128+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51129+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51130+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
51131+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
51132+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
51133+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
51134+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
51135+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51136+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51137+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51138+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51139+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51140+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51141+4 4 4 4 4 4
51142+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51143+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51144+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51145+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
51146+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
51147+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
51148+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
51149+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51150+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51151+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51152+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51153+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51154+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51155+4 4 4 4 4 4
51156+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51157+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51158+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51159+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
51160+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
51161+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
51162+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
51163+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51164+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51165+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51166+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51167+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51168+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51169+4 4 4 4 4 4
51170+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51171+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51172+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51173+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
51174+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
51175+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
51176+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
51177+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51178+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51179+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51180+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51181+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51182+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51183+4 4 4 4 4 4
51184+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51185+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51186+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51187+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
51188+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
51189+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
51190+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
51191+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51192+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51193+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51194+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51195+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51196+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51197+4 4 4 4 4 4
51198+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51199+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51200+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51201+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51202+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
51203+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
51204+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
51205+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51206+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51207+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51208+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51209+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51210+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51211+4 4 4 4 4 4
51212+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51213+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51214+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51215+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51216+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
51217+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
51218+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51219+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51220+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51221+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51222+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51223+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51224+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51225+4 4 4 4 4 4
51226+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51227+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51228+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51229+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51230+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
51231+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
51232+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51233+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51234+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51235+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51236+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51237+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51238+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51239+4 4 4 4 4 4
51240+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51241+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51242+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51243+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51244+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
51245+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
51246+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51248+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51249+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51250+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51251+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51252+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51253+4 4 4 4 4 4
51254diff --git a/drivers/video/mb862xx/mb862xxfb_accel.c b/drivers/video/mb862xx/mb862xxfb_accel.c
51255index fe92eed..106e085 100644
51256--- a/drivers/video/mb862xx/mb862xxfb_accel.c
51257+++ b/drivers/video/mb862xx/mb862xxfb_accel.c
51258@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
51259 struct mb862xxfb_par *par = info->par;
51260
51261 if (info->var.bits_per_pixel == 32) {
51262- info->fbops->fb_fillrect = cfb_fillrect;
51263- info->fbops->fb_copyarea = cfb_copyarea;
51264- info->fbops->fb_imageblit = cfb_imageblit;
51265+ pax_open_kernel();
51266+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
51267+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
51268+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
51269+ pax_close_kernel();
51270 } else {
51271 outreg(disp, GC_L0EM, 3);
51272- info->fbops->fb_fillrect = mb86290fb_fillrect;
51273- info->fbops->fb_copyarea = mb86290fb_copyarea;
51274- info->fbops->fb_imageblit = mb86290fb_imageblit;
51275+ pax_open_kernel();
51276+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
51277+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
51278+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
51279+ pax_close_kernel();
51280 }
51281 outreg(draw, GDC_REG_DRAW_BASE, 0);
51282 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
51283diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
51284index ff22871..b129bed 100644
51285--- a/drivers/video/nvidia/nvidia.c
51286+++ b/drivers/video/nvidia/nvidia.c
51287@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
51288 info->fix.line_length = (info->var.xres_virtual *
51289 info->var.bits_per_pixel) >> 3;
51290 if (info->var.accel_flags) {
51291- info->fbops->fb_imageblit = nvidiafb_imageblit;
51292- info->fbops->fb_fillrect = nvidiafb_fillrect;
51293- info->fbops->fb_copyarea = nvidiafb_copyarea;
51294- info->fbops->fb_sync = nvidiafb_sync;
51295+ pax_open_kernel();
51296+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
51297+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
51298+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
51299+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
51300+ pax_close_kernel();
51301 info->pixmap.scan_align = 4;
51302 info->flags &= ~FBINFO_HWACCEL_DISABLED;
51303 info->flags |= FBINFO_READS_FAST;
51304 NVResetGraphics(info);
51305 } else {
51306- info->fbops->fb_imageblit = cfb_imageblit;
51307- info->fbops->fb_fillrect = cfb_fillrect;
51308- info->fbops->fb_copyarea = cfb_copyarea;
51309- info->fbops->fb_sync = NULL;
51310+ pax_open_kernel();
51311+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
51312+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
51313+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
51314+ *(void **)&info->fbops->fb_sync = NULL;
51315+ pax_close_kernel();
51316 info->pixmap.scan_align = 1;
51317 info->flags |= FBINFO_HWACCEL_DISABLED;
51318 info->flags &= ~FBINFO_READS_FAST;
51319@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
51320 info->pixmap.size = 8 * 1024;
51321 info->pixmap.flags = FB_PIXMAP_SYSTEM;
51322
51323- if (!hwcur)
51324- info->fbops->fb_cursor = NULL;
51325+ if (!hwcur) {
51326+ pax_open_kernel();
51327+ *(void **)&info->fbops->fb_cursor = NULL;
51328+ pax_close_kernel();
51329+ }
51330
51331 info->var.accel_flags = (!noaccel);
51332
51333diff --git a/drivers/video/output.c b/drivers/video/output.c
51334index 0d6f2cd..6285b97 100644
51335--- a/drivers/video/output.c
51336+++ b/drivers/video/output.c
51337@@ -97,7 +97,7 @@ struct output_device *video_output_register(const char *name,
51338 new_dev->props = op;
51339 new_dev->dev.class = &video_output_class;
51340 new_dev->dev.parent = dev;
51341- dev_set_name(&new_dev->dev, name);
51342+ dev_set_name(&new_dev->dev, "%s", name);
51343 dev_set_drvdata(&new_dev->dev, devdata);
51344 ret_code = device_register(&new_dev->dev);
51345 if (ret_code) {
51346diff --git a/drivers/video/s1d13xxxfb.c b/drivers/video/s1d13xxxfb.c
51347index 05c2dc3..ea1f391 100644
51348--- a/drivers/video/s1d13xxxfb.c
51349+++ b/drivers/video/s1d13xxxfb.c
51350@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
51351
51352 switch(prod_id) {
51353 case S1D13506_PROD_ID: /* activate acceleration */
51354- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
51355- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
51356+ pax_open_kernel();
51357+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
51358+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
51359+ pax_close_kernel();
51360 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
51361 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
51362 break;
51363diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
51364index b2b33fc..f9f4658 100644
51365--- a/drivers/video/smscufx.c
51366+++ b/drivers/video/smscufx.c
51367@@ -1175,7 +1175,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
51368 fb_deferred_io_cleanup(info);
51369 kfree(info->fbdefio);
51370 info->fbdefio = NULL;
51371- info->fbops->fb_mmap = ufx_ops_mmap;
51372+ pax_open_kernel();
51373+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
51374+ pax_close_kernel();
51375 }
51376
51377 pr_debug("released /dev/fb%d user=%d count=%d",
51378diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
51379index ec03e72..f578436 100644
51380--- a/drivers/video/udlfb.c
51381+++ b/drivers/video/udlfb.c
51382@@ -623,11 +623,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
51383 dlfb_urb_completion(urb);
51384
51385 error:
51386- atomic_add(bytes_sent, &dev->bytes_sent);
51387- atomic_add(bytes_identical, &dev->bytes_identical);
51388- atomic_add(width*height*2, &dev->bytes_rendered);
51389+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
51390+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
51391+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
51392 end_cycles = get_cycles();
51393- atomic_add(((unsigned int) ((end_cycles - start_cycles)
51394+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
51395 >> 10)), /* Kcycles */
51396 &dev->cpu_kcycles_used);
51397
51398@@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
51399 dlfb_urb_completion(urb);
51400
51401 error:
51402- atomic_add(bytes_sent, &dev->bytes_sent);
51403- atomic_add(bytes_identical, &dev->bytes_identical);
51404- atomic_add(bytes_rendered, &dev->bytes_rendered);
51405+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
51406+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
51407+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
51408 end_cycles = get_cycles();
51409- atomic_add(((unsigned int) ((end_cycles - start_cycles)
51410+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
51411 >> 10)), /* Kcycles */
51412 &dev->cpu_kcycles_used);
51413 }
51414@@ -993,7 +993,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
51415 fb_deferred_io_cleanup(info);
51416 kfree(info->fbdefio);
51417 info->fbdefio = NULL;
51418- info->fbops->fb_mmap = dlfb_ops_mmap;
51419+ pax_open_kernel();
51420+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
51421+ pax_close_kernel();
51422 }
51423
51424 pr_warn("released /dev/fb%d user=%d count=%d\n",
51425@@ -1376,7 +1378,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
51426 struct fb_info *fb_info = dev_get_drvdata(fbdev);
51427 struct dlfb_data *dev = fb_info->par;
51428 return snprintf(buf, PAGE_SIZE, "%u\n",
51429- atomic_read(&dev->bytes_rendered));
51430+ atomic_read_unchecked(&dev->bytes_rendered));
51431 }
51432
51433 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
51434@@ -1384,7 +1386,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
51435 struct fb_info *fb_info = dev_get_drvdata(fbdev);
51436 struct dlfb_data *dev = fb_info->par;
51437 return snprintf(buf, PAGE_SIZE, "%u\n",
51438- atomic_read(&dev->bytes_identical));
51439+ atomic_read_unchecked(&dev->bytes_identical));
51440 }
51441
51442 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
51443@@ -1392,7 +1394,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
51444 struct fb_info *fb_info = dev_get_drvdata(fbdev);
51445 struct dlfb_data *dev = fb_info->par;
51446 return snprintf(buf, PAGE_SIZE, "%u\n",
51447- atomic_read(&dev->bytes_sent));
51448+ atomic_read_unchecked(&dev->bytes_sent));
51449 }
51450
51451 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
51452@@ -1400,7 +1402,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
51453 struct fb_info *fb_info = dev_get_drvdata(fbdev);
51454 struct dlfb_data *dev = fb_info->par;
51455 return snprintf(buf, PAGE_SIZE, "%u\n",
51456- atomic_read(&dev->cpu_kcycles_used));
51457+ atomic_read_unchecked(&dev->cpu_kcycles_used));
51458 }
51459
51460 static ssize_t edid_show(
51461@@ -1460,10 +1462,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
51462 struct fb_info *fb_info = dev_get_drvdata(fbdev);
51463 struct dlfb_data *dev = fb_info->par;
51464
51465- atomic_set(&dev->bytes_rendered, 0);
51466- atomic_set(&dev->bytes_identical, 0);
51467- atomic_set(&dev->bytes_sent, 0);
51468- atomic_set(&dev->cpu_kcycles_used, 0);
51469+ atomic_set_unchecked(&dev->bytes_rendered, 0);
51470+ atomic_set_unchecked(&dev->bytes_identical, 0);
51471+ atomic_set_unchecked(&dev->bytes_sent, 0);
51472+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
51473
51474 return count;
51475 }
51476diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
51477index e328a61..1b08ecb 100644
51478--- a/drivers/video/uvesafb.c
51479+++ b/drivers/video/uvesafb.c
51480@@ -19,6 +19,7 @@
51481 #include <linux/io.h>
51482 #include <linux/mutex.h>
51483 #include <linux/slab.h>
51484+#include <linux/moduleloader.h>
51485 #include <video/edid.h>
51486 #include <video/uvesafb.h>
51487 #ifdef CONFIG_X86
51488@@ -569,10 +570,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
51489 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
51490 par->pmi_setpal = par->ypan = 0;
51491 } else {
51492+
51493+#ifdef CONFIG_PAX_KERNEXEC
51494+#ifdef CONFIG_MODULES
51495+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
51496+#endif
51497+ if (!par->pmi_code) {
51498+ par->pmi_setpal = par->ypan = 0;
51499+ return 0;
51500+ }
51501+#endif
51502+
51503 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
51504 + task->t.regs.edi);
51505+
51506+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
51507+ pax_open_kernel();
51508+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
51509+ pax_close_kernel();
51510+
51511+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
51512+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
51513+#else
51514 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
51515 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
51516+#endif
51517+
51518 printk(KERN_INFO "uvesafb: protected mode interface info at "
51519 "%04x:%04x\n",
51520 (u16)task->t.regs.es, (u16)task->t.regs.edi);
51521@@ -817,13 +840,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
51522 par->ypan = ypan;
51523
51524 if (par->pmi_setpal || par->ypan) {
51525+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
51526 if (__supported_pte_mask & _PAGE_NX) {
51527 par->pmi_setpal = par->ypan = 0;
51528 printk(KERN_WARNING "uvesafb: NX protection is actively."
51529 "We have better not to use the PMI.\n");
51530- } else {
51531+ } else
51532+#endif
51533 uvesafb_vbe_getpmi(task, par);
51534- }
51535 }
51536 #else
51537 /* The protected mode interface is not available on non-x86. */
51538@@ -1457,8 +1481,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
51539 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
51540
51541 /* Disable blanking if the user requested so. */
51542- if (!blank)
51543- info->fbops->fb_blank = NULL;
51544+ if (!blank) {
51545+ pax_open_kernel();
51546+ *(void **)&info->fbops->fb_blank = NULL;
51547+ pax_close_kernel();
51548+ }
51549
51550 /*
51551 * Find out how much IO memory is required for the mode with
51552@@ -1534,8 +1561,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
51553 info->flags = FBINFO_FLAG_DEFAULT |
51554 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
51555
51556- if (!par->ypan)
51557- info->fbops->fb_pan_display = NULL;
51558+ if (!par->ypan) {
51559+ pax_open_kernel();
51560+ *(void **)&info->fbops->fb_pan_display = NULL;
51561+ pax_close_kernel();
51562+ }
51563 }
51564
51565 static void uvesafb_init_mtrr(struct fb_info *info)
51566@@ -1836,6 +1866,11 @@ out:
51567 if (par->vbe_modes)
51568 kfree(par->vbe_modes);
51569
51570+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
51571+ if (par->pmi_code)
51572+ module_free_exec(NULL, par->pmi_code);
51573+#endif
51574+
51575 framebuffer_release(info);
51576 return err;
51577 }
51578@@ -1862,6 +1897,12 @@ static int uvesafb_remove(struct platform_device *dev)
51579 kfree(par->vbe_state_orig);
51580 if (par->vbe_state_saved)
51581 kfree(par->vbe_state_saved);
51582+
51583+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
51584+ if (par->pmi_code)
51585+ module_free_exec(NULL, par->pmi_code);
51586+#endif
51587+
51588 }
51589
51590 framebuffer_release(info);
51591diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
51592index 501b340..d80aa17 100644
51593--- a/drivers/video/vesafb.c
51594+++ b/drivers/video/vesafb.c
51595@@ -9,6 +9,7 @@
51596 */
51597
51598 #include <linux/module.h>
51599+#include <linux/moduleloader.h>
51600 #include <linux/kernel.h>
51601 #include <linux/errno.h>
51602 #include <linux/string.h>
51603@@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
51604 static int vram_total __initdata; /* Set total amount of memory */
51605 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
51606 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
51607-static void (*pmi_start)(void) __read_mostly;
51608-static void (*pmi_pal) (void) __read_mostly;
51609+static void (*pmi_start)(void) __read_only;
51610+static void (*pmi_pal) (void) __read_only;
51611 static int depth __read_mostly;
51612 static int vga_compat __read_mostly;
51613 /* --------------------------------------------------------------------- */
51614@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
51615 unsigned int size_vmode;
51616 unsigned int size_remap;
51617 unsigned int size_total;
51618+ void *pmi_code = NULL;
51619
51620 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
51621 return -ENODEV;
51622@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
51623 size_remap = size_total;
51624 vesafb_fix.smem_len = size_remap;
51625
51626-#ifndef __i386__
51627- screen_info.vesapm_seg = 0;
51628-#endif
51629-
51630 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
51631 printk(KERN_WARNING
51632 "vesafb: cannot reserve video memory at 0x%lx\n",
51633@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
51634 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
51635 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
51636
51637+#ifdef __i386__
51638+
51639+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
51640+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
51641+ if (!pmi_code)
51642+#elif !defined(CONFIG_PAX_KERNEXEC)
51643+ if (0)
51644+#endif
51645+
51646+#endif
51647+ screen_info.vesapm_seg = 0;
51648+
51649 if (screen_info.vesapm_seg) {
51650- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
51651- screen_info.vesapm_seg,screen_info.vesapm_off);
51652+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
51653+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
51654 }
51655
51656 if (screen_info.vesapm_seg < 0xc000)
51657@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
51658
51659 if (ypan || pmi_setpal) {
51660 unsigned short *pmi_base;
51661+
51662 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
51663- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
51664- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
51665+
51666+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
51667+ pax_open_kernel();
51668+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
51669+#else
51670+ pmi_code = pmi_base;
51671+#endif
51672+
51673+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
51674+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
51675+
51676+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
51677+ pmi_start = ktva_ktla(pmi_start);
51678+ pmi_pal = ktva_ktla(pmi_pal);
51679+ pax_close_kernel();
51680+#endif
51681+
51682 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
51683 if (pmi_base[3]) {
51684 printk(KERN_INFO "vesafb: pmi: ports = ");
51685@@ -472,8 +498,11 @@ static int __init vesafb_probe(struct platform_device *dev)
51686 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
51687 (ypan ? FBINFO_HWACCEL_YPAN : 0);
51688
51689- if (!ypan)
51690- info->fbops->fb_pan_display = NULL;
51691+ if (!ypan) {
51692+ pax_open_kernel();
51693+ *(void **)&info->fbops->fb_pan_display = NULL;
51694+ pax_close_kernel();
51695+ }
51696
51697 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
51698 err = -ENOMEM;
51699@@ -488,6 +517,11 @@ static int __init vesafb_probe(struct platform_device *dev)
51700 info->node, info->fix.id);
51701 return 0;
51702 err:
51703+
51704+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
51705+ module_free_exec(NULL, pmi_code);
51706+#endif
51707+
51708 if (info->screen_base)
51709 iounmap(info->screen_base);
51710 framebuffer_release(info);
51711diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
51712index 88714ae..16c2e11 100644
51713--- a/drivers/video/via/via_clock.h
51714+++ b/drivers/video/via/via_clock.h
51715@@ -56,7 +56,7 @@ struct via_clock {
51716
51717 void (*set_engine_pll_state)(u8 state);
51718 void (*set_engine_pll)(struct via_pll_config config);
51719-};
51720+} __no_const;
51721
51722
51723 static inline u32 get_pll_internal_frequency(u32 ref_freq,
51724diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
51725index fef20db..d28b1ab 100644
51726--- a/drivers/xen/xenfs/xenstored.c
51727+++ b/drivers/xen/xenfs/xenstored.c
51728@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
51729 static int xsd_kva_open(struct inode *inode, struct file *file)
51730 {
51731 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
51732+#ifdef CONFIG_GRKERNSEC_HIDESYM
51733+ NULL);
51734+#else
51735 xen_store_interface);
51736+#endif
51737+
51738 if (!file->private_data)
51739 return -ENOMEM;
51740 return 0;
51741diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
51742index 055562c..fdfb10d 100644
51743--- a/fs/9p/vfs_addr.c
51744+++ b/fs/9p/vfs_addr.c
51745@@ -186,7 +186,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
51746
51747 retval = v9fs_file_write_internal(inode,
51748 v9inode->writeback_fid,
51749- (__force const char __user *)buffer,
51750+ (const char __force_user *)buffer,
51751 len, &offset, 0);
51752 if (retval > 0)
51753 retval = 0;
51754diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
51755index d86edc8..40ff2fb 100644
51756--- a/fs/9p/vfs_inode.c
51757+++ b/fs/9p/vfs_inode.c
51758@@ -1314,7 +1314,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
51759 void
51760 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
51761 {
51762- char *s = nd_get_link(nd);
51763+ const char *s = nd_get_link(nd);
51764
51765 p9_debug(P9_DEBUG_VFS, " %s %s\n",
51766 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
51767diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
51768index 370b24c..ff0be7b 100644
51769--- a/fs/Kconfig.binfmt
51770+++ b/fs/Kconfig.binfmt
51771@@ -103,7 +103,7 @@ config HAVE_AOUT
51772
51773 config BINFMT_AOUT
51774 tristate "Kernel support for a.out and ECOFF binaries"
51775- depends on HAVE_AOUT
51776+ depends on HAVE_AOUT && BROKEN
51777 ---help---
51778 A.out (Assembler.OUTput) is a set of formats for libraries and
51779 executables used in the earliest versions of UNIX. Linux used
51780diff --git a/fs/aio.c b/fs/aio.c
51781index 2bbcacf..8614116 100644
51782--- a/fs/aio.c
51783+++ b/fs/aio.c
51784@@ -160,7 +160,7 @@ static int aio_setup_ring(struct kioctx *ctx)
51785 size += sizeof(struct io_event) * nr_events;
51786 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
51787
51788- if (nr_pages < 0)
51789+ if (nr_pages <= 0)
51790 return -EINVAL;
51791
51792 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
51793@@ -950,6 +950,7 @@ static ssize_t aio_rw_vect_retry(struct kiocb *iocb, int rw, aio_rw_op *rw_op)
51794 static ssize_t aio_setup_vectored_rw(int rw, struct kiocb *kiocb, bool compat)
51795 {
51796 ssize_t ret;
51797+ struct iovec iovstack;
51798
51799 kiocb->ki_nr_segs = kiocb->ki_nbytes;
51800
51801@@ -957,17 +958,22 @@ static ssize_t aio_setup_vectored_rw(int rw, struct kiocb *kiocb, bool compat)
51802 if (compat)
51803 ret = compat_rw_copy_check_uvector(rw,
51804 (struct compat_iovec __user *)kiocb->ki_buf,
51805- kiocb->ki_nr_segs, 1, &kiocb->ki_inline_vec,
51806+ kiocb->ki_nr_segs, 1, &iovstack,
51807 &kiocb->ki_iovec);
51808 else
51809 #endif
51810 ret = rw_copy_check_uvector(rw,
51811 (struct iovec __user *)kiocb->ki_buf,
51812- kiocb->ki_nr_segs, 1, &kiocb->ki_inline_vec,
51813+ kiocb->ki_nr_segs, 1, &iovstack,
51814 &kiocb->ki_iovec);
51815 if (ret < 0)
51816 return ret;
51817
51818+ if (kiocb->ki_iovec == &iovstack) {
51819+ kiocb->ki_inline_vec = iovstack;
51820+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
51821+ }
51822+
51823 /* ki_nbytes now reflect bytes instead of segs */
51824 kiocb->ki_nbytes = ret;
51825 return 0;
51826diff --git a/fs/attr.c b/fs/attr.c
51827index 1449adb..a2038c2 100644
51828--- a/fs/attr.c
51829+++ b/fs/attr.c
51830@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
51831 unsigned long limit;
51832
51833 limit = rlimit(RLIMIT_FSIZE);
51834+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
51835 if (limit != RLIM_INFINITY && offset > limit)
51836 goto out_sig;
51837 if (offset > inode->i_sb->s_maxbytes)
51838diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
51839index 3db70da..7aeec5b 100644
51840--- a/fs/autofs4/waitq.c
51841+++ b/fs/autofs4/waitq.c
51842@@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
51843 {
51844 unsigned long sigpipe, flags;
51845 mm_segment_t fs;
51846- const char *data = (const char *)addr;
51847+ const char __user *data = (const char __force_user *)addr;
51848 ssize_t wr = 0;
51849
51850 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
51851@@ -346,6 +346,10 @@ static int validate_request(struct autofs_wait_queue **wait,
51852 return 1;
51853 }
51854
51855+#ifdef CONFIG_GRKERNSEC_HIDESYM
51856+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
51857+#endif
51858+
51859 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
51860 enum autofs_notify notify)
51861 {
51862@@ -379,7 +383,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
51863
51864 /* If this is a direct mount request create a dummy name */
51865 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
51866+#ifdef CONFIG_GRKERNSEC_HIDESYM
51867+ /* this name does get written to userland via autofs4_write() */
51868+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
51869+#else
51870 qstr.len = sprintf(name, "%p", dentry);
51871+#endif
51872 else {
51873 qstr.len = autofs4_getpath(sbi, dentry, &name);
51874 if (!qstr.len) {
51875diff --git a/fs/befs/endian.h b/fs/befs/endian.h
51876index 2722387..c8dd2a7 100644
51877--- a/fs/befs/endian.h
51878+++ b/fs/befs/endian.h
51879@@ -11,7 +11,7 @@
51880
51881 #include <asm/byteorder.h>
51882
51883-static inline u64
51884+static inline u64 __intentional_overflow(-1)
51885 fs64_to_cpu(const struct super_block *sb, fs64 n)
51886 {
51887 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
51888@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
51889 return (__force fs64)cpu_to_be64(n);
51890 }
51891
51892-static inline u32
51893+static inline u32 __intentional_overflow(-1)
51894 fs32_to_cpu(const struct super_block *sb, fs32 n)
51895 {
51896 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
51897diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
51898index f95dddc..b1e2c1c 100644
51899--- a/fs/befs/linuxvfs.c
51900+++ b/fs/befs/linuxvfs.c
51901@@ -510,7 +510,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
51902 {
51903 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
51904 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
51905- char *link = nd_get_link(nd);
51906+ const char *link = nd_get_link(nd);
51907 if (!IS_ERR(link))
51908 kfree(link);
51909 }
51910diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
51911index bce8769..7fc7544 100644
51912--- a/fs/binfmt_aout.c
51913+++ b/fs/binfmt_aout.c
51914@@ -16,6 +16,7 @@
51915 #include <linux/string.h>
51916 #include <linux/fs.h>
51917 #include <linux/file.h>
51918+#include <linux/security.h>
51919 #include <linux/stat.h>
51920 #include <linux/fcntl.h>
51921 #include <linux/ptrace.h>
51922@@ -59,6 +60,8 @@ static int aout_core_dump(struct coredump_params *cprm)
51923 #endif
51924 # define START_STACK(u) ((void __user *)u.start_stack)
51925
51926+ memset(&dump, 0, sizeof(dump));
51927+
51928 fs = get_fs();
51929 set_fs(KERNEL_DS);
51930 has_dumped = 1;
51931@@ -69,10 +72,12 @@ static int aout_core_dump(struct coredump_params *cprm)
51932
51933 /* If the size of the dump file exceeds the rlimit, then see what would happen
51934 if we wrote the stack, but not the data area. */
51935+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
51936 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
51937 dump.u_dsize = 0;
51938
51939 /* Make sure we have enough room to write the stack and data areas. */
51940+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
51941 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
51942 dump.u_ssize = 0;
51943
51944@@ -233,6 +238,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
51945 rlim = rlimit(RLIMIT_DATA);
51946 if (rlim >= RLIM_INFINITY)
51947 rlim = ~0;
51948+
51949+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
51950 if (ex.a_data + ex.a_bss > rlim)
51951 return -ENOMEM;
51952
51953@@ -267,6 +274,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
51954
51955 install_exec_creds(bprm);
51956
51957+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
51958+ current->mm->pax_flags = 0UL;
51959+#endif
51960+
51961+#ifdef CONFIG_PAX_PAGEEXEC
51962+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
51963+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
51964+
51965+#ifdef CONFIG_PAX_EMUTRAMP
51966+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
51967+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
51968+#endif
51969+
51970+#ifdef CONFIG_PAX_MPROTECT
51971+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
51972+ current->mm->pax_flags |= MF_PAX_MPROTECT;
51973+#endif
51974+
51975+ }
51976+#endif
51977+
51978 if (N_MAGIC(ex) == OMAGIC) {
51979 unsigned long text_addr, map_size;
51980 loff_t pos;
51981@@ -324,7 +352,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
51982 }
51983
51984 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
51985- PROT_READ | PROT_WRITE | PROT_EXEC,
51986+ PROT_READ | PROT_WRITE,
51987 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
51988 fd_offset + ex.a_text);
51989 if (error != N_DATADDR(ex)) {
51990diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
51991index f8a0b0e..6f036ed 100644
51992--- a/fs/binfmt_elf.c
51993+++ b/fs/binfmt_elf.c
51994@@ -34,6 +34,7 @@
51995 #include <linux/utsname.h>
51996 #include <linux/coredump.h>
51997 #include <linux/sched.h>
51998+#include <linux/xattr.h>
51999 #include <asm/uaccess.h>
52000 #include <asm/param.h>
52001 #include <asm/page.h>
52002@@ -60,6 +61,14 @@ static int elf_core_dump(struct coredump_params *cprm);
52003 #define elf_core_dump NULL
52004 #endif
52005
52006+#ifdef CONFIG_PAX_MPROTECT
52007+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
52008+#endif
52009+
52010+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
52011+static void elf_handle_mmap(struct file *file);
52012+#endif
52013+
52014 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
52015 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
52016 #else
52017@@ -79,6 +88,15 @@ static struct linux_binfmt elf_format = {
52018 .load_binary = load_elf_binary,
52019 .load_shlib = load_elf_library,
52020 .core_dump = elf_core_dump,
52021+
52022+#ifdef CONFIG_PAX_MPROTECT
52023+ .handle_mprotect= elf_handle_mprotect,
52024+#endif
52025+
52026+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
52027+ .handle_mmap = elf_handle_mmap,
52028+#endif
52029+
52030 .min_coredump = ELF_EXEC_PAGESIZE,
52031 };
52032
52033@@ -86,6 +104,8 @@ static struct linux_binfmt elf_format = {
52034
52035 static int set_brk(unsigned long start, unsigned long end)
52036 {
52037+ unsigned long e = end;
52038+
52039 start = ELF_PAGEALIGN(start);
52040 end = ELF_PAGEALIGN(end);
52041 if (end > start) {
52042@@ -94,7 +114,7 @@ static int set_brk(unsigned long start, unsigned long end)
52043 if (BAD_ADDR(addr))
52044 return addr;
52045 }
52046- current->mm->start_brk = current->mm->brk = end;
52047+ current->mm->start_brk = current->mm->brk = e;
52048 return 0;
52049 }
52050
52051@@ -155,12 +175,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
52052 elf_addr_t __user *u_rand_bytes;
52053 const char *k_platform = ELF_PLATFORM;
52054 const char *k_base_platform = ELF_BASE_PLATFORM;
52055- unsigned char k_rand_bytes[16];
52056+ u32 k_rand_bytes[4];
52057 int items;
52058 elf_addr_t *elf_info;
52059 int ei_index = 0;
52060 const struct cred *cred = current_cred();
52061 struct vm_area_struct *vma;
52062+ unsigned long saved_auxv[AT_VECTOR_SIZE];
52063
52064 /*
52065 * In some cases (e.g. Hyper-Threading), we want to avoid L1
52066@@ -202,8 +223,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
52067 * Generate 16 random bytes for userspace PRNG seeding.
52068 */
52069 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
52070- u_rand_bytes = (elf_addr_t __user *)
52071- STACK_ALLOC(p, sizeof(k_rand_bytes));
52072+ prandom_seed(k_rand_bytes[0] ^ prandom_u32());
52073+ prandom_seed(k_rand_bytes[1] ^ prandom_u32());
52074+ prandom_seed(k_rand_bytes[2] ^ prandom_u32());
52075+ prandom_seed(k_rand_bytes[3] ^ prandom_u32());
52076+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
52077+ u_rand_bytes = (elf_addr_t __user *) p;
52078 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
52079 return -EFAULT;
52080
52081@@ -318,9 +343,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
52082 return -EFAULT;
52083 current->mm->env_end = p;
52084
52085+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
52086+
52087 /* Put the elf_info on the stack in the right place. */
52088 sp = (elf_addr_t __user *)envp + 1;
52089- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
52090+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
52091 return -EFAULT;
52092 return 0;
52093 }
52094@@ -388,15 +415,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
52095 an ELF header */
52096
52097 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
52098- struct file *interpreter, unsigned long *interp_map_addr,
52099- unsigned long no_base)
52100+ struct file *interpreter, unsigned long no_base)
52101 {
52102 struct elf_phdr *elf_phdata;
52103 struct elf_phdr *eppnt;
52104- unsigned long load_addr = 0;
52105+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
52106 int load_addr_set = 0;
52107 unsigned long last_bss = 0, elf_bss = 0;
52108- unsigned long error = ~0UL;
52109+ unsigned long error = -EINVAL;
52110 unsigned long total_size;
52111 int retval, i, size;
52112
52113@@ -442,6 +468,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
52114 goto out_close;
52115 }
52116
52117+#ifdef CONFIG_PAX_SEGMEXEC
52118+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
52119+ pax_task_size = SEGMEXEC_TASK_SIZE;
52120+#endif
52121+
52122 eppnt = elf_phdata;
52123 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
52124 if (eppnt->p_type == PT_LOAD) {
52125@@ -465,8 +496,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
52126 map_addr = elf_map(interpreter, load_addr + vaddr,
52127 eppnt, elf_prot, elf_type, total_size);
52128 total_size = 0;
52129- if (!*interp_map_addr)
52130- *interp_map_addr = map_addr;
52131 error = map_addr;
52132 if (BAD_ADDR(map_addr))
52133 goto out_close;
52134@@ -485,8 +514,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
52135 k = load_addr + eppnt->p_vaddr;
52136 if (BAD_ADDR(k) ||
52137 eppnt->p_filesz > eppnt->p_memsz ||
52138- eppnt->p_memsz > TASK_SIZE ||
52139- TASK_SIZE - eppnt->p_memsz < k) {
52140+ eppnt->p_memsz > pax_task_size ||
52141+ pax_task_size - eppnt->p_memsz < k) {
52142 error = -ENOMEM;
52143 goto out_close;
52144 }
52145@@ -538,6 +567,315 @@ out:
52146 return error;
52147 }
52148
52149+#ifdef CONFIG_PAX_PT_PAX_FLAGS
52150+#ifdef CONFIG_PAX_SOFTMODE
52151+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
52152+{
52153+ unsigned long pax_flags = 0UL;
52154+
52155+#ifdef CONFIG_PAX_PAGEEXEC
52156+ if (elf_phdata->p_flags & PF_PAGEEXEC)
52157+ pax_flags |= MF_PAX_PAGEEXEC;
52158+#endif
52159+
52160+#ifdef CONFIG_PAX_SEGMEXEC
52161+ if (elf_phdata->p_flags & PF_SEGMEXEC)
52162+ pax_flags |= MF_PAX_SEGMEXEC;
52163+#endif
52164+
52165+#ifdef CONFIG_PAX_EMUTRAMP
52166+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
52167+ pax_flags |= MF_PAX_EMUTRAMP;
52168+#endif
52169+
52170+#ifdef CONFIG_PAX_MPROTECT
52171+ if (elf_phdata->p_flags & PF_MPROTECT)
52172+ pax_flags |= MF_PAX_MPROTECT;
52173+#endif
52174+
52175+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
52176+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
52177+ pax_flags |= MF_PAX_RANDMMAP;
52178+#endif
52179+
52180+ return pax_flags;
52181+}
52182+#endif
52183+
52184+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
52185+{
52186+ unsigned long pax_flags = 0UL;
52187+
52188+#ifdef CONFIG_PAX_PAGEEXEC
52189+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
52190+ pax_flags |= MF_PAX_PAGEEXEC;
52191+#endif
52192+
52193+#ifdef CONFIG_PAX_SEGMEXEC
52194+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
52195+ pax_flags |= MF_PAX_SEGMEXEC;
52196+#endif
52197+
52198+#ifdef CONFIG_PAX_EMUTRAMP
52199+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
52200+ pax_flags |= MF_PAX_EMUTRAMP;
52201+#endif
52202+
52203+#ifdef CONFIG_PAX_MPROTECT
52204+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
52205+ pax_flags |= MF_PAX_MPROTECT;
52206+#endif
52207+
52208+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
52209+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
52210+ pax_flags |= MF_PAX_RANDMMAP;
52211+#endif
52212+
52213+ return pax_flags;
52214+}
52215+#endif
52216+
52217+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
52218+#ifdef CONFIG_PAX_SOFTMODE
52219+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
52220+{
52221+ unsigned long pax_flags = 0UL;
52222+
52223+#ifdef CONFIG_PAX_PAGEEXEC
52224+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
52225+ pax_flags |= MF_PAX_PAGEEXEC;
52226+#endif
52227+
52228+#ifdef CONFIG_PAX_SEGMEXEC
52229+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
52230+ pax_flags |= MF_PAX_SEGMEXEC;
52231+#endif
52232+
52233+#ifdef CONFIG_PAX_EMUTRAMP
52234+ if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
52235+ pax_flags |= MF_PAX_EMUTRAMP;
52236+#endif
52237+
52238+#ifdef CONFIG_PAX_MPROTECT
52239+ if (pax_flags_softmode & MF_PAX_MPROTECT)
52240+ pax_flags |= MF_PAX_MPROTECT;
52241+#endif
52242+
52243+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
52244+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
52245+ pax_flags |= MF_PAX_RANDMMAP;
52246+#endif
52247+
52248+ return pax_flags;
52249+}
52250+#endif
52251+
52252+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
52253+{
52254+ unsigned long pax_flags = 0UL;
52255+
52256+#ifdef CONFIG_PAX_PAGEEXEC
52257+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
52258+ pax_flags |= MF_PAX_PAGEEXEC;
52259+#endif
52260+
52261+#ifdef CONFIG_PAX_SEGMEXEC
52262+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
52263+ pax_flags |= MF_PAX_SEGMEXEC;
52264+#endif
52265+
52266+#ifdef CONFIG_PAX_EMUTRAMP
52267+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
52268+ pax_flags |= MF_PAX_EMUTRAMP;
52269+#endif
52270+
52271+#ifdef CONFIG_PAX_MPROTECT
52272+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
52273+ pax_flags |= MF_PAX_MPROTECT;
52274+#endif
52275+
52276+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
52277+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
52278+ pax_flags |= MF_PAX_RANDMMAP;
52279+#endif
52280+
52281+ return pax_flags;
52282+}
52283+#endif
52284+
52285+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
52286+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
52287+{
52288+ unsigned long pax_flags = 0UL;
52289+
52290+#ifdef CONFIG_PAX_EI_PAX
52291+
52292+#ifdef CONFIG_PAX_PAGEEXEC
52293+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
52294+ pax_flags |= MF_PAX_PAGEEXEC;
52295+#endif
52296+
52297+#ifdef CONFIG_PAX_SEGMEXEC
52298+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
52299+ pax_flags |= MF_PAX_SEGMEXEC;
52300+#endif
52301+
52302+#ifdef CONFIG_PAX_EMUTRAMP
52303+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
52304+ pax_flags |= MF_PAX_EMUTRAMP;
52305+#endif
52306+
52307+#ifdef CONFIG_PAX_MPROTECT
52308+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
52309+ pax_flags |= MF_PAX_MPROTECT;
52310+#endif
52311+
52312+#ifdef CONFIG_PAX_ASLR
52313+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
52314+ pax_flags |= MF_PAX_RANDMMAP;
52315+#endif
52316+
52317+#else
52318+
52319+#ifdef CONFIG_PAX_PAGEEXEC
52320+ pax_flags |= MF_PAX_PAGEEXEC;
52321+#endif
52322+
52323+#ifdef CONFIG_PAX_SEGMEXEC
52324+ pax_flags |= MF_PAX_SEGMEXEC;
52325+#endif
52326+
52327+#ifdef CONFIG_PAX_MPROTECT
52328+ pax_flags |= MF_PAX_MPROTECT;
52329+#endif
52330+
52331+#ifdef CONFIG_PAX_RANDMMAP
52332+ if (randomize_va_space)
52333+ pax_flags |= MF_PAX_RANDMMAP;
52334+#endif
52335+
52336+#endif
52337+
52338+ return pax_flags;
52339+}
52340+
52341+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
52342+{
52343+
52344+#ifdef CONFIG_PAX_PT_PAX_FLAGS
52345+ unsigned long i;
52346+
52347+ for (i = 0UL; i < elf_ex->e_phnum; i++)
52348+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
52349+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
52350+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
52351+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
52352+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
52353+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
52354+ return ~0UL;
52355+
52356+#ifdef CONFIG_PAX_SOFTMODE
52357+ if (pax_softmode)
52358+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
52359+ else
52360+#endif
52361+
52362+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
52363+ break;
52364+ }
52365+#endif
52366+
52367+ return ~0UL;
52368+}
52369+
52370+static unsigned long pax_parse_xattr_pax(struct file * const file)
52371+{
52372+
52373+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
52374+ ssize_t xattr_size, i;
52375+ unsigned char xattr_value[sizeof("pemrs") - 1];
52376+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
52377+
52378+ xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
52379+ if (xattr_size <= 0 || xattr_size > sizeof xattr_value)
52380+ return ~0UL;
52381+
52382+ for (i = 0; i < xattr_size; i++)
52383+ switch (xattr_value[i]) {
52384+ default:
52385+ return ~0UL;
52386+
52387+#define parse_flag(option1, option2, flag) \
52388+ case option1: \
52389+ if (pax_flags_hardmode & MF_PAX_##flag) \
52390+ return ~0UL; \
52391+ pax_flags_hardmode |= MF_PAX_##flag; \
52392+ break; \
52393+ case option2: \
52394+ if (pax_flags_softmode & MF_PAX_##flag) \
52395+ return ~0UL; \
52396+ pax_flags_softmode |= MF_PAX_##flag; \
52397+ break;
52398+
52399+ parse_flag('p', 'P', PAGEEXEC);
52400+ parse_flag('e', 'E', EMUTRAMP);
52401+ parse_flag('m', 'M', MPROTECT);
52402+ parse_flag('r', 'R', RANDMMAP);
52403+ parse_flag('s', 'S', SEGMEXEC);
52404+
52405+#undef parse_flag
52406+ }
52407+
52408+ if (pax_flags_hardmode & pax_flags_softmode)
52409+ return ~0UL;
52410+
52411+#ifdef CONFIG_PAX_SOFTMODE
52412+ if (pax_softmode)
52413+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
52414+ else
52415+#endif
52416+
52417+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
52418+#else
52419+ return ~0UL;
52420+#endif
52421+
52422+}
52423+
52424+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
52425+{
52426+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
52427+
52428+ pax_flags = pax_parse_ei_pax(elf_ex);
52429+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
52430+ xattr_pax_flags = pax_parse_xattr_pax(file);
52431+
52432+ if (pt_pax_flags == ~0UL)
52433+ pt_pax_flags = xattr_pax_flags;
52434+ else if (xattr_pax_flags == ~0UL)
52435+ xattr_pax_flags = pt_pax_flags;
52436+ if (pt_pax_flags != xattr_pax_flags)
52437+ return -EINVAL;
52438+ if (pt_pax_flags != ~0UL)
52439+ pax_flags = pt_pax_flags;
52440+
52441+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
52442+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
52443+ if ((__supported_pte_mask & _PAGE_NX))
52444+ pax_flags &= ~MF_PAX_SEGMEXEC;
52445+ else
52446+ pax_flags &= ~MF_PAX_PAGEEXEC;
52447+ }
52448+#endif
52449+
52450+ if (0 > pax_check_flags(&pax_flags))
52451+ return -EINVAL;
52452+
52453+ current->mm->pax_flags = pax_flags;
52454+ return 0;
52455+}
52456+#endif
52457+
52458 /*
52459 * These are the functions used to load ELF style executables and shared
52460 * libraries. There is no binary dependent code anywhere else.
52461@@ -554,6 +892,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
52462 {
52463 unsigned int random_variable = 0;
52464
52465+#ifdef CONFIG_PAX_RANDUSTACK
52466+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
52467+ return stack_top - current->mm->delta_stack;
52468+#endif
52469+
52470 if ((current->flags & PF_RANDOMIZE) &&
52471 !(current->personality & ADDR_NO_RANDOMIZE)) {
52472 random_variable = get_random_int() & STACK_RND_MASK;
52473@@ -572,7 +915,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
52474 unsigned long load_addr = 0, load_bias = 0;
52475 int load_addr_set = 0;
52476 char * elf_interpreter = NULL;
52477- unsigned long error;
52478+ unsigned long error = 0;
52479 struct elf_phdr *elf_ppnt, *elf_phdata;
52480 unsigned long elf_bss, elf_brk;
52481 int retval, i;
52482@@ -582,12 +925,12 @@ static int load_elf_binary(struct linux_binprm *bprm)
52483 unsigned long start_code, end_code, start_data, end_data;
52484 unsigned long reloc_func_desc __maybe_unused = 0;
52485 int executable_stack = EXSTACK_DEFAULT;
52486- unsigned long def_flags = 0;
52487 struct pt_regs *regs = current_pt_regs();
52488 struct {
52489 struct elfhdr elf_ex;
52490 struct elfhdr interp_elf_ex;
52491 } *loc;
52492+ unsigned long pax_task_size = TASK_SIZE;
52493
52494 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
52495 if (!loc) {
52496@@ -723,11 +1066,81 @@ static int load_elf_binary(struct linux_binprm *bprm)
52497 goto out_free_dentry;
52498
52499 /* OK, This is the point of no return */
52500- current->mm->def_flags = def_flags;
52501+
52502+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
52503+ current->mm->pax_flags = 0UL;
52504+#endif
52505+
52506+#ifdef CONFIG_PAX_DLRESOLVE
52507+ current->mm->call_dl_resolve = 0UL;
52508+#endif
52509+
52510+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
52511+ current->mm->call_syscall = 0UL;
52512+#endif
52513+
52514+#ifdef CONFIG_PAX_ASLR
52515+ current->mm->delta_mmap = 0UL;
52516+ current->mm->delta_stack = 0UL;
52517+#endif
52518+
52519+ current->mm->def_flags = 0;
52520+
52521+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
52522+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
52523+ send_sig(SIGKILL, current, 0);
52524+ goto out_free_dentry;
52525+ }
52526+#endif
52527+
52528+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
52529+ pax_set_initial_flags(bprm);
52530+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
52531+ if (pax_set_initial_flags_func)
52532+ (pax_set_initial_flags_func)(bprm);
52533+#endif
52534+
52535+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
52536+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
52537+ current->mm->context.user_cs_limit = PAGE_SIZE;
52538+ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
52539+ }
52540+#endif
52541+
52542+#ifdef CONFIG_PAX_SEGMEXEC
52543+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
52544+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
52545+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
52546+ pax_task_size = SEGMEXEC_TASK_SIZE;
52547+ current->mm->def_flags |= VM_NOHUGEPAGE;
52548+ }
52549+#endif
52550+
52551+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
52552+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
52553+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
52554+ put_cpu();
52555+ }
52556+#endif
52557
52558 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
52559 may depend on the personality. */
52560 SET_PERSONALITY(loc->elf_ex);
52561+
52562+#ifdef CONFIG_PAX_ASLR
52563+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
52564+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
52565+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
52566+ }
52567+#endif
52568+
52569+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
52570+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
52571+ executable_stack = EXSTACK_DISABLE_X;
52572+ current->personality &= ~READ_IMPLIES_EXEC;
52573+ } else
52574+#endif
52575+
52576 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
52577 current->personality |= READ_IMPLIES_EXEC;
52578
52579@@ -819,6 +1232,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
52580 #else
52581 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
52582 #endif
52583+
52584+#ifdef CONFIG_PAX_RANDMMAP
52585+ /* PaX: randomize base address at the default exe base if requested */
52586+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
52587+#ifdef CONFIG_SPARC64
52588+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
52589+#else
52590+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
52591+#endif
52592+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
52593+ elf_flags |= MAP_FIXED;
52594+ }
52595+#endif
52596+
52597 }
52598
52599 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
52600@@ -851,9 +1278,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
52601 * allowed task size. Note that p_filesz must always be
52602 * <= p_memsz so it is only necessary to check p_memsz.
52603 */
52604- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
52605- elf_ppnt->p_memsz > TASK_SIZE ||
52606- TASK_SIZE - elf_ppnt->p_memsz < k) {
52607+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
52608+ elf_ppnt->p_memsz > pax_task_size ||
52609+ pax_task_size - elf_ppnt->p_memsz < k) {
52610 /* set_brk can never work. Avoid overflows. */
52611 send_sig(SIGKILL, current, 0);
52612 retval = -EINVAL;
52613@@ -892,17 +1319,45 @@ static int load_elf_binary(struct linux_binprm *bprm)
52614 goto out_free_dentry;
52615 }
52616 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
52617- send_sig(SIGSEGV, current, 0);
52618- retval = -EFAULT; /* Nobody gets to see this, but.. */
52619- goto out_free_dentry;
52620+ /*
52621+ * This bss-zeroing can fail if the ELF
52622+ * file specifies odd protections. So
52623+ * we don't check the return value
52624+ */
52625 }
52626
52627+#ifdef CONFIG_PAX_RANDMMAP
52628+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
52629+ unsigned long start, size, flags;
52630+ vm_flags_t vm_flags;
52631+
52632+ start = ELF_PAGEALIGN(elf_brk);
52633+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
52634+ flags = MAP_FIXED | MAP_PRIVATE;
52635+ vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
52636+
52637+ down_write(&current->mm->mmap_sem);
52638+ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
52639+ retval = -ENOMEM;
52640+ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
52641+// if (current->personality & ADDR_NO_RANDOMIZE)
52642+// vm_flags |= VM_READ | VM_MAYREAD;
52643+ start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
52644+ retval = IS_ERR_VALUE(start) ? start : 0;
52645+ }
52646+ up_write(&current->mm->mmap_sem);
52647+ if (retval == 0)
52648+ retval = set_brk(start + size, start + size + PAGE_SIZE);
52649+ if (retval < 0) {
52650+ send_sig(SIGKILL, current, 0);
52651+ goto out_free_dentry;
52652+ }
52653+ }
52654+#endif
52655+
52656 if (elf_interpreter) {
52657- unsigned long interp_map_addr = 0;
52658-
52659 elf_entry = load_elf_interp(&loc->interp_elf_ex,
52660 interpreter,
52661- &interp_map_addr,
52662 load_bias);
52663 if (!IS_ERR((void *)elf_entry)) {
52664 /*
52665@@ -1124,7 +1579,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
52666 * Decide what to dump of a segment, part, all or none.
52667 */
52668 static unsigned long vma_dump_size(struct vm_area_struct *vma,
52669- unsigned long mm_flags)
52670+ unsigned long mm_flags, long signr)
52671 {
52672 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
52673
52674@@ -1162,7 +1617,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
52675 if (vma->vm_file == NULL)
52676 return 0;
52677
52678- if (FILTER(MAPPED_PRIVATE))
52679+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
52680 goto whole;
52681
52682 /*
52683@@ -1387,9 +1842,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
52684 {
52685 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
52686 int i = 0;
52687- do
52688+ do {
52689 i += 2;
52690- while (auxv[i - 2] != AT_NULL);
52691+ } while (auxv[i - 2] != AT_NULL);
52692 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
52693 }
52694
52695@@ -1398,7 +1853,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
52696 {
52697 mm_segment_t old_fs = get_fs();
52698 set_fs(KERNEL_DS);
52699- copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
52700+ copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo);
52701 set_fs(old_fs);
52702 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
52703 }
52704@@ -2019,14 +2474,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
52705 }
52706
52707 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
52708- unsigned long mm_flags)
52709+ struct coredump_params *cprm)
52710 {
52711 struct vm_area_struct *vma;
52712 size_t size = 0;
52713
52714 for (vma = first_vma(current, gate_vma); vma != NULL;
52715 vma = next_vma(vma, gate_vma))
52716- size += vma_dump_size(vma, mm_flags);
52717+ size += vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
52718 return size;
52719 }
52720
52721@@ -2119,7 +2574,7 @@ static int elf_core_dump(struct coredump_params *cprm)
52722
52723 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
52724
52725- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
52726+ offset += elf_core_vma_data_size(gate_vma, cprm);
52727 offset += elf_core_extra_data_size();
52728 e_shoff = offset;
52729
52730@@ -2133,10 +2588,12 @@ static int elf_core_dump(struct coredump_params *cprm)
52731 offset = dataoff;
52732
52733 size += sizeof(*elf);
52734+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
52735 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
52736 goto end_coredump;
52737
52738 size += sizeof(*phdr4note);
52739+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
52740 if (size > cprm->limit
52741 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
52742 goto end_coredump;
52743@@ -2150,7 +2607,7 @@ static int elf_core_dump(struct coredump_params *cprm)
52744 phdr.p_offset = offset;
52745 phdr.p_vaddr = vma->vm_start;
52746 phdr.p_paddr = 0;
52747- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
52748+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
52749 phdr.p_memsz = vma->vm_end - vma->vm_start;
52750 offset += phdr.p_filesz;
52751 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
52752@@ -2161,6 +2618,7 @@ static int elf_core_dump(struct coredump_params *cprm)
52753 phdr.p_align = ELF_EXEC_PAGESIZE;
52754
52755 size += sizeof(phdr);
52756+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
52757 if (size > cprm->limit
52758 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
52759 goto end_coredump;
52760@@ -2185,7 +2643,7 @@ static int elf_core_dump(struct coredump_params *cprm)
52761 unsigned long addr;
52762 unsigned long end;
52763
52764- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
52765+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
52766
52767 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
52768 struct page *page;
52769@@ -2194,6 +2652,7 @@ static int elf_core_dump(struct coredump_params *cprm)
52770 page = get_dump_page(addr);
52771 if (page) {
52772 void *kaddr = kmap(page);
52773+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
52774 stop = ((size += PAGE_SIZE) > cprm->limit) ||
52775 !dump_write(cprm->file, kaddr,
52776 PAGE_SIZE);
52777@@ -2211,6 +2670,7 @@ static int elf_core_dump(struct coredump_params *cprm)
52778
52779 if (e_phnum == PN_XNUM) {
52780 size += sizeof(*shdr4extnum);
52781+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
52782 if (size > cprm->limit
52783 || !dump_write(cprm->file, shdr4extnum,
52784 sizeof(*shdr4extnum)))
52785@@ -2231,6 +2691,167 @@ out:
52786
52787 #endif /* CONFIG_ELF_CORE */
52788
52789+#ifdef CONFIG_PAX_MPROTECT
52790+/* PaX: non-PIC ELF libraries need relocations on their executable segments
52791+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
52792+ * we'll remove VM_MAYWRITE for good on RELRO segments.
52793+ *
52794+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
52795+ * basis because we want to allow the common case and not the special ones.
52796+ */
52797+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
52798+{
52799+ struct elfhdr elf_h;
52800+ struct elf_phdr elf_p;
52801+ unsigned long i;
52802+ unsigned long oldflags;
52803+ bool is_textrel_rw, is_textrel_rx, is_relro;
52804+
52805+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT) || !vma->vm_file)
52806+ return;
52807+
52808+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
52809+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
52810+
52811+#ifdef CONFIG_PAX_ELFRELOCS
52812+ /* possible TEXTREL */
52813+ is_textrel_rw = !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
52814+ is_textrel_rx = vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
52815+#else
52816+ is_textrel_rw = false;
52817+ is_textrel_rx = false;
52818+#endif
52819+
52820+ /* possible RELRO */
52821+ is_relro = vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
52822+
52823+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
52824+ return;
52825+
52826+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
52827+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
52828+
52829+#ifdef CONFIG_PAX_ETEXECRELOCS
52830+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
52831+#else
52832+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
52833+#endif
52834+
52835+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
52836+ !elf_check_arch(&elf_h) ||
52837+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
52838+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
52839+ return;
52840+
52841+ for (i = 0UL; i < elf_h.e_phnum; i++) {
52842+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
52843+ return;
52844+ switch (elf_p.p_type) {
52845+ case PT_DYNAMIC:
52846+ if (!is_textrel_rw && !is_textrel_rx)
52847+ continue;
52848+ i = 0UL;
52849+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
52850+ elf_dyn dyn;
52851+
52852+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
52853+ break;
52854+ if (dyn.d_tag == DT_NULL)
52855+ break;
52856+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
52857+ gr_log_textrel(vma);
52858+ if (is_textrel_rw)
52859+ vma->vm_flags |= VM_MAYWRITE;
52860+ else
52861+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
52862+ vma->vm_flags &= ~VM_MAYWRITE;
52863+ break;
52864+ }
52865+ i++;
52866+ }
52867+ is_textrel_rw = false;
52868+ is_textrel_rx = false;
52869+ continue;
52870+
52871+ case PT_GNU_RELRO:
52872+ if (!is_relro)
52873+ continue;
52874+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
52875+ vma->vm_flags &= ~VM_MAYWRITE;
52876+ is_relro = false;
52877+ continue;
52878+
52879+#ifdef CONFIG_PAX_PT_PAX_FLAGS
52880+ case PT_PAX_FLAGS: {
52881+ const char *msg_mprotect = "", *msg_emutramp = "";
52882+ char *buffer_lib, *buffer_exe;
52883+
52884+ if (elf_p.p_flags & PF_NOMPROTECT)
52885+ msg_mprotect = "MPROTECT disabled";
52886+
52887+#ifdef CONFIG_PAX_EMUTRAMP
52888+ if (!(vma->vm_mm->pax_flags & MF_PAX_EMUTRAMP) && !(elf_p.p_flags & PF_NOEMUTRAMP))
52889+ msg_emutramp = "EMUTRAMP enabled";
52890+#endif
52891+
52892+ if (!msg_mprotect[0] && !msg_emutramp[0])
52893+ continue;
52894+
52895+ if (!printk_ratelimit())
52896+ continue;
52897+
52898+ buffer_lib = (char *)__get_free_page(GFP_KERNEL);
52899+ buffer_exe = (char *)__get_free_page(GFP_KERNEL);
52900+ if (buffer_lib && buffer_exe) {
52901+ char *path_lib, *path_exe;
52902+
52903+ path_lib = pax_get_path(&vma->vm_file->f_path, buffer_lib, PAGE_SIZE);
52904+ path_exe = pax_get_path(&vma->vm_mm->exe_file->f_path, buffer_exe, PAGE_SIZE);
52905+
52906+ pr_info("PAX: %s wants %s%s%s on %s\n", path_lib, msg_mprotect,
52907+ (msg_mprotect[0] && msg_emutramp[0] ? " and " : ""), msg_emutramp, path_exe);
52908+
52909+ }
52910+ free_page((unsigned long)buffer_exe);
52911+ free_page((unsigned long)buffer_lib);
52912+ continue;
52913+ }
52914+#endif
52915+
52916+ }
52917+ }
52918+}
52919+#endif
52920+
52921+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
52922+
52923+extern int grsec_enable_log_rwxmaps;
52924+
52925+static void elf_handle_mmap(struct file *file)
52926+{
52927+ struct elfhdr elf_h;
52928+ struct elf_phdr elf_p;
52929+ unsigned long i;
52930+
52931+ if (!grsec_enable_log_rwxmaps)
52932+ return;
52933+
52934+ if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
52935+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
52936+ (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) ||
52937+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
52938+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
52939+ return;
52940+
52941+ for (i = 0UL; i < elf_h.e_phnum; i++) {
52942+ if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
52943+ return;
52944+ if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
52945+ gr_log_ptgnustack(file);
52946+ }
52947+}
52948+#endif
52949+
52950 static int __init init_elf_binfmt(void)
52951 {
52952 register_binfmt(&elf_format);
52953diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
52954index d50bbe5..af3b649 100644
52955--- a/fs/binfmt_flat.c
52956+++ b/fs/binfmt_flat.c
52957@@ -566,7 +566,9 @@ static int load_flat_file(struct linux_binprm * bprm,
52958 realdatastart = (unsigned long) -ENOMEM;
52959 printk("Unable to allocate RAM for process data, errno %d\n",
52960 (int)-realdatastart);
52961+ down_write(&current->mm->mmap_sem);
52962 vm_munmap(textpos, text_len);
52963+ up_write(&current->mm->mmap_sem);
52964 ret = realdatastart;
52965 goto err;
52966 }
52967@@ -590,8 +592,10 @@ static int load_flat_file(struct linux_binprm * bprm,
52968 }
52969 if (IS_ERR_VALUE(result)) {
52970 printk("Unable to read data+bss, errno %d\n", (int)-result);
52971+ down_write(&current->mm->mmap_sem);
52972 vm_munmap(textpos, text_len);
52973 vm_munmap(realdatastart, len);
52974+ up_write(&current->mm->mmap_sem);
52975 ret = result;
52976 goto err;
52977 }
52978@@ -653,8 +657,10 @@ static int load_flat_file(struct linux_binprm * bprm,
52979 }
52980 if (IS_ERR_VALUE(result)) {
52981 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
52982+ down_write(&current->mm->mmap_sem);
52983 vm_munmap(textpos, text_len + data_len + extra +
52984 MAX_SHARED_LIBS * sizeof(unsigned long));
52985+ up_write(&current->mm->mmap_sem);
52986 ret = result;
52987 goto err;
52988 }
52989diff --git a/fs/bio.c b/fs/bio.c
e2b79cd1 52990index c5eae72..599e3cf 100644
bb5f0bf8
AF
52991--- a/fs/bio.c
52992+++ b/fs/bio.c
e2b79cd1 52993@@ -1106,7 +1106,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
bb5f0bf8
AF
52994 /*
52995 * Overflow, abort
52996 */
52997- if (end < start)
52998+ if (end < start || end - start > INT_MAX - nr_pages)
52999 return ERR_PTR(-EINVAL);
53000
53001 nr_pages += end - start;
e2b79cd1 53002@@ -1240,7 +1240,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
bb5f0bf8
AF
53003 /*
53004 * Overflow, abort
53005 */
53006- if (end < start)
53007+ if (end < start || end - start > INT_MAX - nr_pages)
53008 return ERR_PTR(-EINVAL);
53009
53010 nr_pages += end - start;
e2b79cd1 53011@@ -1502,7 +1502,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
bb5f0bf8
AF
53012 const int read = bio_data_dir(bio) == READ;
53013 struct bio_map_data *bmd = bio->bi_private;
53014 int i;
53015- char *p = bmd->sgvecs[0].iov_base;
53016+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
53017
53018 bio_for_each_segment_all(bvec, bio, i) {
53019 char *addr = page_address(bvec->bv_page);
53020diff --git a/fs/block_dev.c b/fs/block_dev.c
53021index 85f5c85..d6f0b1a 100644
53022--- a/fs/block_dev.c
53023+++ b/fs/block_dev.c
53024@@ -658,7 +658,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
53025 else if (bdev->bd_contains == bdev)
53026 return true; /* is a whole device which isn't held */
53027
53028- else if (whole->bd_holder == bd_may_claim)
53029+ else if (whole->bd_holder == (void *)bd_may_claim)
53030 return true; /* is a partition of a device that is being partitioned */
53031 else if (whole->bd_holder != NULL)
53032 return false; /* is a partition of a held device */
53033diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
53034index 7fb054b..ad36c67 100644
53035--- a/fs/btrfs/ctree.c
53036+++ b/fs/btrfs/ctree.c
53037@@ -1076,9 +1076,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
53038 free_extent_buffer(buf);
53039 add_root_to_dirty_list(root);
53040 } else {
53041- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
53042- parent_start = parent->start;
53043- else
53044+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
53045+ if (parent)
53046+ parent_start = parent->start;
53047+ else
53048+ parent_start = 0;
53049+ } else
53050 parent_start = 0;
53051
53052 WARN_ON(trans->transid != btrfs_header_generation(parent));
53053diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
53054index 0f81d67..0ad55fe 100644
53055--- a/fs/btrfs/ioctl.c
53056+++ b/fs/btrfs/ioctl.c
53057@@ -3084,9 +3084,12 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
53058 for (i = 0; i < num_types; i++) {
53059 struct btrfs_space_info *tmp;
53060
53061+ /* Don't copy in more than we allocated */
53062 if (!slot_count)
53063 break;
53064
53065+ slot_count--;
53066+
53067 info = NULL;
53068 rcu_read_lock();
53069 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
53070@@ -3108,10 +3111,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
53071 memcpy(dest, &space, sizeof(space));
53072 dest++;
53073 space_args.total_spaces++;
53074- slot_count--;
53075 }
53076- if (!slot_count)
53077- break;
53078 }
53079 up_read(&info->groups_sem);
53080 }
53081diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
53082index f0857e0..e7023c5 100644
53083--- a/fs/btrfs/super.c
53084+++ b/fs/btrfs/super.c
53085@@ -265,7 +265,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
53086 function, line, errstr);
53087 return;
53088 }
53089- ACCESS_ONCE(trans->transaction->aborted) = errno;
53090+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
53091 __btrfs_std_error(root->fs_info, function, line, errno, NULL);
53092 }
53093 /*
53094diff --git a/fs/buffer.c b/fs/buffer.c
53095index d2a4d1b..df798ca 100644
53096--- a/fs/buffer.c
53097+++ b/fs/buffer.c
53098@@ -3367,7 +3367,7 @@ void __init buffer_init(void)
53099 bh_cachep = kmem_cache_create("buffer_head",
53100 sizeof(struct buffer_head), 0,
53101 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
53102- SLAB_MEM_SPREAD),
53103+ SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
53104 NULL);
53105
53106 /*
53107diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
53108index 622f469..e8d2d55 100644
53109--- a/fs/cachefiles/bind.c
53110+++ b/fs/cachefiles/bind.c
53111@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
53112 args);
53113
53114 /* start by checking things over */
53115- ASSERT(cache->fstop_percent >= 0 &&
53116- cache->fstop_percent < cache->fcull_percent &&
53117+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
53118 cache->fcull_percent < cache->frun_percent &&
53119 cache->frun_percent < 100);
53120
53121- ASSERT(cache->bstop_percent >= 0 &&
53122- cache->bstop_percent < cache->bcull_percent &&
53123+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
53124 cache->bcull_percent < cache->brun_percent &&
53125 cache->brun_percent < 100);
53126
53127diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
53128index 0a1467b..6a53245 100644
53129--- a/fs/cachefiles/daemon.c
53130+++ b/fs/cachefiles/daemon.c
53131@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
53132 if (n > buflen)
53133 return -EMSGSIZE;
53134
53135- if (copy_to_user(_buffer, buffer, n) != 0)
53136+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
53137 return -EFAULT;
53138
53139 return n;
53140@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
53141 if (test_bit(CACHEFILES_DEAD, &cache->flags))
53142 return -EIO;
53143
53144- if (datalen < 0 || datalen > PAGE_SIZE - 1)
53145+ if (datalen > PAGE_SIZE - 1)
53146 return -EOPNOTSUPP;
53147
53148 /* drag the command string into the kernel so we can parse it */
53149@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
53150 if (args[0] != '%' || args[1] != '\0')
53151 return -EINVAL;
53152
53153- if (fstop < 0 || fstop >= cache->fcull_percent)
53154+ if (fstop >= cache->fcull_percent)
53155 return cachefiles_daemon_range_error(cache, args);
53156
53157 cache->fstop_percent = fstop;
53158@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
53159 if (args[0] != '%' || args[1] != '\0')
53160 return -EINVAL;
53161
53162- if (bstop < 0 || bstop >= cache->bcull_percent)
53163+ if (bstop >= cache->bcull_percent)
53164 return cachefiles_daemon_range_error(cache, args);
53165
53166 cache->bstop_percent = bstop;
53167diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
53168index 4938251..7e01445 100644
53169--- a/fs/cachefiles/internal.h
53170+++ b/fs/cachefiles/internal.h
53171@@ -59,7 +59,7 @@ struct cachefiles_cache {
53172 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
53173 struct rb_root active_nodes; /* active nodes (can't be culled) */
53174 rwlock_t active_lock; /* lock for active_nodes */
53175- atomic_t gravecounter; /* graveyard uniquifier */
53176+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
53177 unsigned frun_percent; /* when to stop culling (% files) */
53178 unsigned fcull_percent; /* when to start culling (% files) */
53179 unsigned fstop_percent; /* when to stop allocating (% files) */
53180@@ -171,19 +171,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
53181 * proc.c
53182 */
53183 #ifdef CONFIG_CACHEFILES_HISTOGRAM
53184-extern atomic_t cachefiles_lookup_histogram[HZ];
53185-extern atomic_t cachefiles_mkdir_histogram[HZ];
53186-extern atomic_t cachefiles_create_histogram[HZ];
53187+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
53188+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
53189+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
53190
53191 extern int __init cachefiles_proc_init(void);
53192 extern void cachefiles_proc_cleanup(void);
53193 static inline
53194-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
53195+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
53196 {
53197 unsigned long jif = jiffies - start_jif;
53198 if (jif >= HZ)
53199 jif = HZ - 1;
53200- atomic_inc(&histogram[jif]);
53201+ atomic_inc_unchecked(&histogram[jif]);
53202 }
53203
53204 #else
53205diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
53206index 8c01c5fc..15f982e 100644
53207--- a/fs/cachefiles/namei.c
53208+++ b/fs/cachefiles/namei.c
53209@@ -317,7 +317,7 @@ try_again:
53210 /* first step is to make up a grave dentry in the graveyard */
53211 sprintf(nbuffer, "%08x%08x",
53212 (uint32_t) get_seconds(),
53213- (uint32_t) atomic_inc_return(&cache->gravecounter));
53214+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
53215
53216 /* do the multiway lock magic */
53217 trap = lock_rename(cache->graveyard, dir);
53218diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
53219index eccd339..4c1d995 100644
53220--- a/fs/cachefiles/proc.c
53221+++ b/fs/cachefiles/proc.c
53222@@ -14,9 +14,9 @@
53223 #include <linux/seq_file.h>
53224 #include "internal.h"
53225
53226-atomic_t cachefiles_lookup_histogram[HZ];
53227-atomic_t cachefiles_mkdir_histogram[HZ];
53228-atomic_t cachefiles_create_histogram[HZ];
53229+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
53230+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
53231+atomic_unchecked_t cachefiles_create_histogram[HZ];
53232
53233 /*
53234 * display the latency histogram
53235@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
53236 return 0;
53237 default:
53238 index = (unsigned long) v - 3;
53239- x = atomic_read(&cachefiles_lookup_histogram[index]);
53240- y = atomic_read(&cachefiles_mkdir_histogram[index]);
53241- z = atomic_read(&cachefiles_create_histogram[index]);
53242+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
53243+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
53244+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
53245 if (x == 0 && y == 0 && z == 0)
53246 return 0;
53247
53248diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
53249index 317f9ee..3d24511 100644
53250--- a/fs/cachefiles/rdwr.c
53251+++ b/fs/cachefiles/rdwr.c
53252@@ -966,7 +966,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
53253 old_fs = get_fs();
53254 set_fs(KERNEL_DS);
53255 ret = file->f_op->write(
53256- file, (const void __user *) data, len, &pos);
53257+ file, (const void __force_user *) data, len, &pos);
53258 set_fs(old_fs);
53259 kunmap(page);
53260 file_end_write(file);
53261diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
53262index f02d82b..2632cf86 100644
53263--- a/fs/ceph/dir.c
53264+++ b/fs/ceph/dir.c
53265@@ -243,7 +243,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
53266 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
53267 struct ceph_mds_client *mdsc = fsc->mdsc;
53268 unsigned frag = fpos_frag(filp->f_pos);
53269- int off = fpos_off(filp->f_pos);
53270+ unsigned int off = fpos_off(filp->f_pos);
53271 int err;
53272 u32 ftype;
53273 struct ceph_mds_reply_info_parsed *rinfo;
53274diff --git a/fs/ceph/super.c b/fs/ceph/super.c
53275index 7d377c9..3fb6559 100644
53276--- a/fs/ceph/super.c
53277+++ b/fs/ceph/super.c
53278@@ -839,7 +839,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
53279 /*
53280 * construct our own bdi so we can control readahead, etc.
53281 */
53282-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
53283+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
53284
53285 static int ceph_register_bdi(struct super_block *sb,
53286 struct ceph_fs_client *fsc)
53287@@ -856,7 +856,7 @@ static int ceph_register_bdi(struct super_block *sb,
53288 default_backing_dev_info.ra_pages;
53289
53290 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
53291- atomic_long_inc_return(&bdi_seq));
53292+ atomic_long_inc_return_unchecked(&bdi_seq));
53293 if (!err)
53294 sb->s_bdi = &fsc->backing_dev_info;
53295 return err;
53296diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
53297index d597483..747901b 100644
53298--- a/fs/cifs/cifs_debug.c
53299+++ b/fs/cifs/cifs_debug.c
53300@@ -284,8 +284,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
53301
53302 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
53303 #ifdef CONFIG_CIFS_STATS2
53304- atomic_set(&totBufAllocCount, 0);
53305- atomic_set(&totSmBufAllocCount, 0);
53306+ atomic_set_unchecked(&totBufAllocCount, 0);
53307+ atomic_set_unchecked(&totSmBufAllocCount, 0);
53308 #endif /* CONFIG_CIFS_STATS2 */
53309 spin_lock(&cifs_tcp_ses_lock);
53310 list_for_each(tmp1, &cifs_tcp_ses_list) {
53311@@ -298,7 +298,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
53312 tcon = list_entry(tmp3,
53313 struct cifs_tcon,
53314 tcon_list);
53315- atomic_set(&tcon->num_smbs_sent, 0);
53316+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
53317 if (server->ops->clear_stats)
53318 server->ops->clear_stats(tcon);
53319 }
53320@@ -330,8 +330,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
53321 smBufAllocCount.counter, cifs_min_small);
53322 #ifdef CONFIG_CIFS_STATS2
53323 seq_printf(m, "Total Large %d Small %d Allocations\n",
53324- atomic_read(&totBufAllocCount),
53325- atomic_read(&totSmBufAllocCount));
53326+ atomic_read_unchecked(&totBufAllocCount),
53327+ atomic_read_unchecked(&totSmBufAllocCount));
53328 #endif /* CONFIG_CIFS_STATS2 */
53329
53330 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
53331@@ -360,7 +360,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
53332 if (tcon->need_reconnect)
53333 seq_puts(m, "\tDISCONNECTED ");
53334 seq_printf(m, "\nSMBs: %d",
53335- atomic_read(&tcon->num_smbs_sent));
53336+ atomic_read_unchecked(&tcon->num_smbs_sent));
53337 if (server->ops->print_stats)
53338 server->ops->print_stats(m, tcon);
53339 }
53340diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
53341index 3752b9f..8db5569 100644
53342--- a/fs/cifs/cifsfs.c
53343+++ b/fs/cifs/cifsfs.c
53344@@ -1035,7 +1035,7 @@ cifs_init_request_bufs(void)
53345 */
53346 cifs_req_cachep = kmem_cache_create("cifs_request",
53347 CIFSMaxBufSize + max_hdr_size, 0,
53348- SLAB_HWCACHE_ALIGN, NULL);
53349+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
53350 if (cifs_req_cachep == NULL)
53351 return -ENOMEM;
53352
53353@@ -1062,7 +1062,7 @@ cifs_init_request_bufs(void)
53354 efficient to alloc 1 per page off the slab compared to 17K (5page)
53355 alloc of large cifs buffers even when page debugging is on */
53356 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
53357- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
53358+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
53359 NULL);
53360 if (cifs_sm_req_cachep == NULL) {
53361 mempool_destroy(cifs_req_poolp);
53362@@ -1147,8 +1147,8 @@ init_cifs(void)
53363 atomic_set(&bufAllocCount, 0);
53364 atomic_set(&smBufAllocCount, 0);
53365 #ifdef CONFIG_CIFS_STATS2
53366- atomic_set(&totBufAllocCount, 0);
53367- atomic_set(&totSmBufAllocCount, 0);
53368+ atomic_set_unchecked(&totBufAllocCount, 0);
53369+ atomic_set_unchecked(&totSmBufAllocCount, 0);
53370 #endif /* CONFIG_CIFS_STATS2 */
53371
53372 atomic_set(&midCount, 0);
53373diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
53374index ea3a0b3..0194e39 100644
53375--- a/fs/cifs/cifsglob.h
53376+++ b/fs/cifs/cifsglob.h
53377@@ -752,35 +752,35 @@ struct cifs_tcon {
53378 __u16 Flags; /* optional support bits */
53379 enum statusEnum tidStatus;
53380 #ifdef CONFIG_CIFS_STATS
53381- atomic_t num_smbs_sent;
53382+ atomic_unchecked_t num_smbs_sent;
53383 union {
53384 struct {
53385- atomic_t num_writes;
53386- atomic_t num_reads;
53387- atomic_t num_flushes;
53388- atomic_t num_oplock_brks;
53389- atomic_t num_opens;
53390- atomic_t num_closes;
53391- atomic_t num_deletes;
53392- atomic_t num_mkdirs;
53393- atomic_t num_posixopens;
53394- atomic_t num_posixmkdirs;
53395- atomic_t num_rmdirs;
53396- atomic_t num_renames;
53397- atomic_t num_t2renames;
53398- atomic_t num_ffirst;
53399- atomic_t num_fnext;
53400- atomic_t num_fclose;
53401- atomic_t num_hardlinks;
53402- atomic_t num_symlinks;
53403- atomic_t num_locks;
53404- atomic_t num_acl_get;
53405- atomic_t num_acl_set;
53406+ atomic_unchecked_t num_writes;
53407+ atomic_unchecked_t num_reads;
53408+ atomic_unchecked_t num_flushes;
53409+ atomic_unchecked_t num_oplock_brks;
53410+ atomic_unchecked_t num_opens;
53411+ atomic_unchecked_t num_closes;
53412+ atomic_unchecked_t num_deletes;
53413+ atomic_unchecked_t num_mkdirs;
53414+ atomic_unchecked_t num_posixopens;
53415+ atomic_unchecked_t num_posixmkdirs;
53416+ atomic_unchecked_t num_rmdirs;
53417+ atomic_unchecked_t num_renames;
53418+ atomic_unchecked_t num_t2renames;
53419+ atomic_unchecked_t num_ffirst;
53420+ atomic_unchecked_t num_fnext;
53421+ atomic_unchecked_t num_fclose;
53422+ atomic_unchecked_t num_hardlinks;
53423+ atomic_unchecked_t num_symlinks;
53424+ atomic_unchecked_t num_locks;
53425+ atomic_unchecked_t num_acl_get;
53426+ atomic_unchecked_t num_acl_set;
53427 } cifs_stats;
53428 #ifdef CONFIG_CIFS_SMB2
53429 struct {
53430- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
53431- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
53432+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
53433+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
53434 } smb2_stats;
53435 #endif /* CONFIG_CIFS_SMB2 */
53436 } stats;
53437@@ -1081,7 +1081,7 @@ convert_delimiter(char *path, char delim)
53438 }
53439
53440 #ifdef CONFIG_CIFS_STATS
53441-#define cifs_stats_inc atomic_inc
53442+#define cifs_stats_inc atomic_inc_unchecked
53443
53444 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
53445 unsigned int bytes)
53446@@ -1446,8 +1446,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
53447 /* Various Debug counters */
53448 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
53449 #ifdef CONFIG_CIFS_STATS2
53450-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
53451-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
53452+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
53453+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
53454 #endif
53455 GLOBAL_EXTERN atomic_t smBufAllocCount;
53456 GLOBAL_EXTERN atomic_t midCount;
53457diff --git a/fs/cifs/link.c b/fs/cifs/link.c
53458index b83c3f5..6437caa 100644
53459--- a/fs/cifs/link.c
53460+++ b/fs/cifs/link.c
53461@@ -616,7 +616,7 @@ symlink_exit:
53462
53463 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
53464 {
53465- char *p = nd_get_link(nd);
53466+ const char *p = nd_get_link(nd);
53467 if (!IS_ERR(p))
53468 kfree(p);
53469 }
53470diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
53471index 1bec014..f329411 100644
53472--- a/fs/cifs/misc.c
53473+++ b/fs/cifs/misc.c
53474@@ -169,7 +169,7 @@ cifs_buf_get(void)
53475 memset(ret_buf, 0, buf_size + 3);
53476 atomic_inc(&bufAllocCount);
53477 #ifdef CONFIG_CIFS_STATS2
53478- atomic_inc(&totBufAllocCount);
53479+ atomic_inc_unchecked(&totBufAllocCount);
53480 #endif /* CONFIG_CIFS_STATS2 */
53481 }
53482
53483@@ -204,7 +204,7 @@ cifs_small_buf_get(void)
53484 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
53485 atomic_inc(&smBufAllocCount);
53486 #ifdef CONFIG_CIFS_STATS2
53487- atomic_inc(&totSmBufAllocCount);
53488+ atomic_inc_unchecked(&totSmBufAllocCount);
53489 #endif /* CONFIG_CIFS_STATS2 */
53490
53491 }
53492diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
53493index 3efdb9d..e845a5e 100644
53494--- a/fs/cifs/smb1ops.c
53495+++ b/fs/cifs/smb1ops.c
53496@@ -591,27 +591,27 @@ static void
53497 cifs_clear_stats(struct cifs_tcon *tcon)
53498 {
53499 #ifdef CONFIG_CIFS_STATS
53500- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
53501- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
53502- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
53503- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
53504- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
53505- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
53506- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
53507- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
53508- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
53509- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
53510- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
53511- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
53512- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
53513- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
53514- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
53515- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
53516- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
53517- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
53518- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
53519- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
53520- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
53521+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
53522+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
53523+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
53524+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
53525+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
53526+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
53527+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
53528+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
53529+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
53530+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
53531+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
53532+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
53533+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
53534+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
53535+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
53536+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
53537+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
53538+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
53539+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
53540+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
53541+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
53542 #endif
53543 }
53544
53545@@ -620,36 +620,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
53546 {
53547 #ifdef CONFIG_CIFS_STATS
53548 seq_printf(m, " Oplocks breaks: %d",
53549- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
53550+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
53551 seq_printf(m, "\nReads: %d Bytes: %llu",
53552- atomic_read(&tcon->stats.cifs_stats.num_reads),
53553+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
53554 (long long)(tcon->bytes_read));
53555 seq_printf(m, "\nWrites: %d Bytes: %llu",
53556- atomic_read(&tcon->stats.cifs_stats.num_writes),
53557+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
53558 (long long)(tcon->bytes_written));
53559 seq_printf(m, "\nFlushes: %d",
53560- atomic_read(&tcon->stats.cifs_stats.num_flushes));
53561+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
53562 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
53563- atomic_read(&tcon->stats.cifs_stats.num_locks),
53564- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
53565- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
53566+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
53567+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
53568+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
53569 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
53570- atomic_read(&tcon->stats.cifs_stats.num_opens),
53571- atomic_read(&tcon->stats.cifs_stats.num_closes),
53572- atomic_read(&tcon->stats.cifs_stats.num_deletes));
53573+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
53574+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
53575+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
53576 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
53577- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
53578- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
53579+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
53580+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
53581 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
53582- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
53583- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
53584+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
53585+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
53586 seq_printf(m, "\nRenames: %d T2 Renames %d",
53587- atomic_read(&tcon->stats.cifs_stats.num_renames),
53588- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
53589+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
53590+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
53591 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
53592- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
53593- atomic_read(&tcon->stats.cifs_stats.num_fnext),
53594- atomic_read(&tcon->stats.cifs_stats.num_fclose));
53595+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
53596+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
53597+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
53598 #endif
53599 }
53600
53601diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
53602index f2e76f3..c44fac7 100644
53603--- a/fs/cifs/smb2ops.c
53604+++ b/fs/cifs/smb2ops.c
53605@@ -274,8 +274,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
53606 #ifdef CONFIG_CIFS_STATS
53607 int i;
53608 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
53609- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
53610- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
53611+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
53612+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
53613 }
53614 #endif
53615 }
53616@@ -284,66 +284,66 @@ static void
53617 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
53618 {
53619 #ifdef CONFIG_CIFS_STATS
53620- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
53621- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
53622+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
53623+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
53624 seq_printf(m, "\nNegotiates: %d sent %d failed",
53625- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
53626- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
53627+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
53628+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
53629 seq_printf(m, "\nSessionSetups: %d sent %d failed",
53630- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
53631- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
53632+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
53633+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
53634 #define SMB2LOGOFF 0x0002 /* trivial request/resp */
53635 seq_printf(m, "\nLogoffs: %d sent %d failed",
53636- atomic_read(&sent[SMB2_LOGOFF_HE]),
53637- atomic_read(&failed[SMB2_LOGOFF_HE]));
53638+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
53639+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
53640 seq_printf(m, "\nTreeConnects: %d sent %d failed",
53641- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
53642- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
53643+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
53644+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
53645 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
53646- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
53647- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
53648+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
53649+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
53650 seq_printf(m, "\nCreates: %d sent %d failed",
53651- atomic_read(&sent[SMB2_CREATE_HE]),
53652- atomic_read(&failed[SMB2_CREATE_HE]));
53653+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
53654+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
53655 seq_printf(m, "\nCloses: %d sent %d failed",
53656- atomic_read(&sent[SMB2_CLOSE_HE]),
53657- atomic_read(&failed[SMB2_CLOSE_HE]));
53658+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
53659+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
53660 seq_printf(m, "\nFlushes: %d sent %d failed",
53661- atomic_read(&sent[SMB2_FLUSH_HE]),
53662- atomic_read(&failed[SMB2_FLUSH_HE]));
53663+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
53664+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
53665 seq_printf(m, "\nReads: %d sent %d failed",
53666- atomic_read(&sent[SMB2_READ_HE]),
53667- atomic_read(&failed[SMB2_READ_HE]));
53668+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
53669+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
53670 seq_printf(m, "\nWrites: %d sent %d failed",
53671- atomic_read(&sent[SMB2_WRITE_HE]),
53672- atomic_read(&failed[SMB2_WRITE_HE]));
53673+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
53674+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
53675 seq_printf(m, "\nLocks: %d sent %d failed",
53676- atomic_read(&sent[SMB2_LOCK_HE]),
53677- atomic_read(&failed[SMB2_LOCK_HE]));
53678+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
53679+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
53680 seq_printf(m, "\nIOCTLs: %d sent %d failed",
53681- atomic_read(&sent[SMB2_IOCTL_HE]),
53682- atomic_read(&failed[SMB2_IOCTL_HE]));
53683+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
53684+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
53685 seq_printf(m, "\nCancels: %d sent %d failed",
53686- atomic_read(&sent[SMB2_CANCEL_HE]),
53687- atomic_read(&failed[SMB2_CANCEL_HE]));
53688+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
53689+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
53690 seq_printf(m, "\nEchos: %d sent %d failed",
53691- atomic_read(&sent[SMB2_ECHO_HE]),
53692- atomic_read(&failed[SMB2_ECHO_HE]));
53693+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
53694+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
53695 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
53696- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
53697- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
53698+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
53699+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
53700 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
53701- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
53702- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
53703+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
53704+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
53705 seq_printf(m, "\nQueryInfos: %d sent %d failed",
53706- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
53707- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
53708+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
53709+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
53710 seq_printf(m, "\nSetInfos: %d sent %d failed",
53711- atomic_read(&sent[SMB2_SET_INFO_HE]),
53712- atomic_read(&failed[SMB2_SET_INFO_HE]));
53713+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
53714+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
53715 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
53716- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
53717- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
53718+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
53719+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
53720 #endif
53721 }
53722
53723diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
53724index 2b95ce2..d079d75 100644
53725--- a/fs/cifs/smb2pdu.c
53726+++ b/fs/cifs/smb2pdu.c
53727@@ -1760,8 +1760,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
53728 default:
53729 cifs_dbg(VFS, "info level %u isn't supported\n",
53730 srch_inf->info_level);
53731- rc = -EINVAL;
53732- goto qdir_exit;
53733+ return -EINVAL;
53734 }
53735
53736 req->FileIndex = cpu_to_le32(index);
53737diff --git a/fs/coda/cache.c b/fs/coda/cache.c
53738index 1da168c..8bc7ff6 100644
53739--- a/fs/coda/cache.c
53740+++ b/fs/coda/cache.c
53741@@ -24,7 +24,7 @@
53742 #include "coda_linux.h"
53743 #include "coda_cache.h"
53744
53745-static atomic_t permission_epoch = ATOMIC_INIT(0);
53746+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
53747
53748 /* replace or extend an acl cache hit */
53749 void coda_cache_enter(struct inode *inode, int mask)
53750@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
53751 struct coda_inode_info *cii = ITOC(inode);
53752
53753 spin_lock(&cii->c_lock);
53754- cii->c_cached_epoch = atomic_read(&permission_epoch);
53755+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
53756 if (!uid_eq(cii->c_uid, current_fsuid())) {
53757 cii->c_uid = current_fsuid();
53758 cii->c_cached_perm = mask;
53759@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
53760 {
53761 struct coda_inode_info *cii = ITOC(inode);
53762 spin_lock(&cii->c_lock);
53763- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
53764+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
53765 spin_unlock(&cii->c_lock);
53766 }
53767
53768 /* remove all acl caches */
53769 void coda_cache_clear_all(struct super_block *sb)
53770 {
53771- atomic_inc(&permission_epoch);
53772+ atomic_inc_unchecked(&permission_epoch);
53773 }
53774
53775
53776@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
53777 spin_lock(&cii->c_lock);
53778 hit = (mask & cii->c_cached_perm) == mask &&
53779 uid_eq(cii->c_uid, current_fsuid()) &&
53780- cii->c_cached_epoch == atomic_read(&permission_epoch);
53781+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
53782 spin_unlock(&cii->c_lock);
53783
53784 return hit;
53785diff --git a/fs/compat.c b/fs/compat.c
53786index fc3b55d..7b568ae 100644
53787--- a/fs/compat.c
53788+++ b/fs/compat.c
53789@@ -54,7 +54,7 @@
53790 #include <asm/ioctls.h>
53791 #include "internal.h"
53792
53793-int compat_log = 1;
53794+int compat_log = 0;
53795
53796 int compat_printk(const char *fmt, ...)
53797 {
53798@@ -488,7 +488,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
53799
53800 set_fs(KERNEL_DS);
53801 /* The __user pointer cast is valid because of the set_fs() */
53802- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
53803+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
53804 set_fs(oldfs);
53805 /* truncating is ok because it's a user address */
53806 if (!ret)
53807@@ -546,7 +546,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
53808 goto out;
53809
53810 ret = -EINVAL;
53811- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
53812+ if (nr_segs > UIO_MAXIOV)
53813 goto out;
53814 if (nr_segs > fast_segs) {
53815 ret = -ENOMEM;
53816@@ -833,6 +833,7 @@ struct compat_old_linux_dirent {
53817
53818 struct compat_readdir_callback {
53819 struct compat_old_linux_dirent __user *dirent;
53820+ struct file * file;
53821 int result;
53822 };
53823
53824@@ -850,6 +851,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
53825 buf->result = -EOVERFLOW;
53826 return -EOVERFLOW;
53827 }
53828+
53829+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
53830+ return 0;
53831+
53832 buf->result++;
53833 dirent = buf->dirent;
53834 if (!access_ok(VERIFY_WRITE, dirent,
53835@@ -880,6 +885,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
53836
53837 buf.result = 0;
53838 buf.dirent = dirent;
53839+ buf.file = f.file;
53840
53841 error = vfs_readdir(f.file, compat_fillonedir, &buf);
53842 if (buf.result)
53843@@ -899,6 +905,7 @@ struct compat_linux_dirent {
53844 struct compat_getdents_callback {
53845 struct compat_linux_dirent __user *current_dir;
53846 struct compat_linux_dirent __user *previous;
53847+ struct file * file;
53848 int count;
53849 int error;
53850 };
53851@@ -920,6 +927,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
53852 buf->error = -EOVERFLOW;
53853 return -EOVERFLOW;
53854 }
53855+
53856+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
53857+ return 0;
53858+
53859 dirent = buf->previous;
53860 if (dirent) {
53861 if (__put_user(offset, &dirent->d_off))
53862@@ -965,6 +976,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
53863 buf.previous = NULL;
53864 buf.count = count;
53865 buf.error = 0;
53866+ buf.file = f.file;
53867
53868 error = vfs_readdir(f.file, compat_filldir, &buf);
53869 if (error >= 0)
53870@@ -985,6 +997,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
53871 struct compat_getdents_callback64 {
53872 struct linux_dirent64 __user *current_dir;
53873 struct linux_dirent64 __user *previous;
53874+ struct file * file;
53875 int count;
53876 int error;
53877 };
53878@@ -1001,6 +1014,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
53879 buf->error = -EINVAL; /* only used if we fail.. */
53880 if (reclen > buf->count)
53881 return -EINVAL;
53882+
53883+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
53884+ return 0;
53885+
53886 dirent = buf->previous;
53887
53888 if (dirent) {
53889@@ -1050,13 +1067,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
53890 buf.previous = NULL;
53891 buf.count = count;
53892 buf.error = 0;
53893+ buf.file = f.file;
53894
53895 error = vfs_readdir(f.file, compat_filldir64, &buf);
53896 if (error >= 0)
53897 error = buf.error;
53898 lastdirent = buf.previous;
53899 if (lastdirent) {
53900- typeof(lastdirent->d_off) d_off = f.file->f_pos;
53901+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
53902 if (__put_user_unaligned(d_off, &lastdirent->d_off))
53903 error = -EFAULT;
53904 else
53905diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
53906index a81147e..20bf2b5 100644
53907--- a/fs/compat_binfmt_elf.c
53908+++ b/fs/compat_binfmt_elf.c
53909@@ -30,11 +30,13 @@
53910 #undef elf_phdr
53911 #undef elf_shdr
53912 #undef elf_note
53913+#undef elf_dyn
53914 #undef elf_addr_t
53915 #define elfhdr elf32_hdr
53916 #define elf_phdr elf32_phdr
53917 #define elf_shdr elf32_shdr
53918 #define elf_note elf32_note
53919+#define elf_dyn Elf32_Dyn
53920 #define elf_addr_t Elf32_Addr
53921
53922 /*
53923diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
53924index 996cdc5..15e2f33 100644
53925--- a/fs/compat_ioctl.c
53926+++ b/fs/compat_ioctl.c
53927@@ -622,7 +622,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
53928 return -EFAULT;
53929 if (__get_user(udata, &ss32->iomem_base))
53930 return -EFAULT;
53931- ss.iomem_base = compat_ptr(udata);
53932+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
53933 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
53934 __get_user(ss.port_high, &ss32->port_high))
53935 return -EFAULT;
53936@@ -703,8 +703,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
53937 for (i = 0; i < nmsgs; i++) {
53938 if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
53939 return -EFAULT;
53940- if (get_user(datap, &umsgs[i].buf) ||
53941- put_user(compat_ptr(datap), &tmsgs[i].buf))
53942+ if (get_user(datap, (u8 __user * __user *)&umsgs[i].buf) ||
53943+ put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
53944 return -EFAULT;
53945 }
53946 return sys_ioctl(fd, cmd, (unsigned long)tdata);
53947@@ -797,7 +797,7 @@ static int compat_ioctl_preallocate(struct file *file,
53948 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
53949 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
53950 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
53951- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
53952+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
53953 return -EFAULT;
53954
53955 return ioctl_preallocate(file, p);
53956@@ -1619,8 +1619,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
53957 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
53958 {
53959 unsigned int a, b;
53960- a = *(unsigned int *)p;
53961- b = *(unsigned int *)q;
53962+ a = *(const unsigned int *)p;
53963+ b = *(const unsigned int *)q;
53964 if (a > b)
53965 return 1;
53966 if (a < b)
53967diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
53968index 7aabc6a..34c1197 100644
53969--- a/fs/configfs/dir.c
53970+++ b/fs/configfs/dir.c
53971@@ -1565,7 +1565,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
53972 }
53973 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
53974 struct configfs_dirent *next;
53975- const char * name;
53976+ const unsigned char * name;
53977+ char d_name[sizeof(next->s_dentry->d_iname)];
53978 int len;
53979 struct inode *inode = NULL;
53980
53981@@ -1575,7 +1576,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
53982 continue;
53983
53984 name = configfs_get_name(next);
53985- len = strlen(name);
53986+ if (next->s_dentry && name == next->s_dentry->d_iname) {
53987+ len = next->s_dentry->d_name.len;
53988+ memcpy(d_name, name, len);
53989+ name = d_name;
53990+ } else
53991+ len = strlen(name);
53992
53993 /*
53994 * We'll have a dentry and an inode for
53995diff --git a/fs/coredump.c b/fs/coredump.c
53996index dafafba..10b3b27 100644
53997--- a/fs/coredump.c
53998+++ b/fs/coredump.c
53999@@ -52,7 +52,7 @@ struct core_name {
54000 char *corename;
54001 int used, size;
54002 };
54003-static atomic_t call_count = ATOMIC_INIT(1);
54004+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
54005
54006 /* The maximal length of core_pattern is also specified in sysctl.c */
54007
54008@@ -60,7 +60,7 @@ static int expand_corename(struct core_name *cn)
54009 {
54010 char *old_corename = cn->corename;
54011
54012- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
54013+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
54014 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
54015
54016 if (!cn->corename) {
54017@@ -157,7 +157,7 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm)
54018 int pid_in_pattern = 0;
54019 int err = 0;
54020
54021- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
54022+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
54023 cn->corename = kmalloc(cn->size, GFP_KERNEL);
54024 cn->used = 0;
54025
54026@@ -435,8 +435,8 @@ static void wait_for_dump_helpers(struct file *file)
54027 struct pipe_inode_info *pipe = file->private_data;
54028
54029 pipe_lock(pipe);
54030- pipe->readers++;
54031- pipe->writers--;
54032+ atomic_inc(&pipe->readers);
54033+ atomic_dec(&pipe->writers);
54034 wake_up_interruptible_sync(&pipe->wait);
54035 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
54036 pipe_unlock(pipe);
54037@@ -445,11 +445,11 @@ static void wait_for_dump_helpers(struct file *file)
54038 * We actually want wait_event_freezable() but then we need
54039 * to clear TIF_SIGPENDING and improve dump_interrupted().
54040 */
54041- wait_event_interruptible(pipe->wait, pipe->readers == 1);
54042+ wait_event_interruptible(pipe->wait, atomic_read(&pipe->readers) == 1);
54043
54044 pipe_lock(pipe);
54045- pipe->readers--;
54046- pipe->writers++;
54047+ atomic_dec(&pipe->readers);
54048+ atomic_inc(&pipe->writers);
54049 pipe_unlock(pipe);
54050 }
54051
54052@@ -496,7 +496,8 @@ void do_coredump(siginfo_t *siginfo)
54053 struct files_struct *displaced;
54054 bool need_nonrelative = false;
54055 bool core_dumped = false;
54056- static atomic_t core_dump_count = ATOMIC_INIT(0);
54057+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
54058+ long signr = siginfo->si_signo;
54059 struct coredump_params cprm = {
54060 .siginfo = siginfo,
54061 .regs = signal_pt_regs(),
54062@@ -509,7 +510,10 @@ void do_coredump(siginfo_t *siginfo)
54063 .mm_flags = mm->flags,
54064 };
54065
54066- audit_core_dumps(siginfo->si_signo);
54067+ audit_core_dumps(signr);
54068+
54069+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
54070+ gr_handle_brute_attach(cprm.mm_flags);
54071
54072 binfmt = mm->binfmt;
54073 if (!binfmt || !binfmt->core_dump)
54074@@ -533,7 +537,7 @@ void do_coredump(siginfo_t *siginfo)
54075 need_nonrelative = true;
54076 }
54077
54078- retval = coredump_wait(siginfo->si_signo, &core_state);
54079+ retval = coredump_wait(signr, &core_state);
54080 if (retval < 0)
54081 goto fail_creds;
54082
54083@@ -576,7 +580,7 @@ void do_coredump(siginfo_t *siginfo)
54084 }
54085 cprm.limit = RLIM_INFINITY;
54086
54087- dump_count = atomic_inc_return(&core_dump_count);
54088+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
54089 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
54090 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
54091 task_tgid_vnr(current), current->comm);
54092@@ -608,6 +612,8 @@ void do_coredump(siginfo_t *siginfo)
54093 } else {
54094 struct inode *inode;
54095
54096+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
54097+
54098 if (cprm.limit < binfmt->min_coredump)
54099 goto fail_unlock;
54100
54101@@ -666,7 +672,7 @@ close_fail:
54102 filp_close(cprm.file, NULL);
54103 fail_dropcount:
54104 if (ispipe)
54105- atomic_dec(&core_dump_count);
54106+ atomic_dec_unchecked(&core_dump_count);
54107 fail_unlock:
54108 kfree(cn.corename);
54109 fail_corename:
54110@@ -687,7 +693,7 @@ int dump_write(struct file *file, const void *addr, int nr)
54111 {
54112 return !dump_interrupted() &&
54113 access_ok(VERIFY_READ, addr, nr) &&
54114- file->f_op->write(file, addr, nr, &file->f_pos) == nr;
54115+ file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
54116 }
54117 EXPORT_SYMBOL(dump_write);
54118
54119diff --git a/fs/dcache.c b/fs/dcache.c
54120index f09b908..04b9690 100644
54121--- a/fs/dcache.c
54122+++ b/fs/dcache.c
54123@@ -3086,7 +3086,8 @@ void __init vfs_caches_init(unsigned long mempages)
54124 mempages -= reserve;
54125
54126 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
54127- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
54128+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY|
54129+ SLAB_NO_SANITIZE, NULL);
54130
54131 dcache_init();
54132 inode_init();
54133diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
54134index c7c83ff..bda9461 100644
54135--- a/fs/debugfs/inode.c
54136+++ b/fs/debugfs/inode.c
54137@@ -415,7 +415,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
54138 */
54139 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
54140 {
54141+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
54142+ return __create_file(name, S_IFDIR | S_IRWXU,
54143+#else
54144 return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
54145+#endif
54146 parent, NULL, NULL);
54147 }
54148 EXPORT_SYMBOL_GPL(debugfs_create_dir);
54149diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
54150index 5eab400..810a3f5 100644
54151--- a/fs/ecryptfs/inode.c
54152+++ b/fs/ecryptfs/inode.c
54153@@ -674,7 +674,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
54154 old_fs = get_fs();
54155 set_fs(get_ds());
54156 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
54157- (char __user *)lower_buf,
54158+ (char __force_user *)lower_buf,
54159 PATH_MAX);
54160 set_fs(old_fs);
54161 if (rc < 0)
54162@@ -706,7 +706,7 @@ out:
54163 static void
54164 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
54165 {
54166- char *buf = nd_get_link(nd);
54167+ const char *buf = nd_get_link(nd);
54168 if (!IS_ERR(buf)) {
54169 /* Free the char* */
54170 kfree(buf);
54171diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
54172index e4141f2..d8263e8 100644
54173--- a/fs/ecryptfs/miscdev.c
54174+++ b/fs/ecryptfs/miscdev.c
54175@@ -304,7 +304,7 @@ check_list:
54176 goto out_unlock_msg_ctx;
54177 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
54178 if (msg_ctx->msg) {
54179- if (copy_to_user(&buf[i], packet_length, packet_length_size))
54180+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
54181 goto out_unlock_msg_ctx;
54182 i += packet_length_size;
54183 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
54184diff --git a/fs/exec.c b/fs/exec.c
54185index 1f44670..3c84660 100644
54186--- a/fs/exec.c
54187+++ b/fs/exec.c
54188@@ -55,8 +55,20 @@
54189 #include <linux/pipe_fs_i.h>
54190 #include <linux/oom.h>
54191 #include <linux/compat.h>
54192+#include <linux/random.h>
54193+#include <linux/seq_file.h>
54194+#include <linux/coredump.h>
54195+#include <linux/mman.h>
54196+
54197+#ifdef CONFIG_PAX_REFCOUNT
54198+#include <linux/kallsyms.h>
54199+#include <linux/kdebug.h>
54200+#endif
54201+
54202+#include <trace/events/fs.h>
54203
54204 #include <asm/uaccess.h>
54205+#include <asm/sections.h>
54206 #include <asm/mmu_context.h>
54207 #include <asm/tlb.h>
54208
54209@@ -66,17 +78,32 @@
54210
54211 #include <trace/events/sched.h>
54212
54213+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
54214+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
54215+{
54216+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
54217+}
54218+#endif
54219+
54220+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
54221+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
54222+EXPORT_SYMBOL(pax_set_initial_flags_func);
54223+#endif
54224+
54225 int suid_dumpable = 0;
54226
54227 static LIST_HEAD(formats);
54228 static DEFINE_RWLOCK(binfmt_lock);
54229
54230+extern int gr_process_kernel_exec_ban(void);
54231+extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm);
54232+
54233 void __register_binfmt(struct linux_binfmt * fmt, int insert)
54234 {
54235 BUG_ON(!fmt);
54236 write_lock(&binfmt_lock);
54237- insert ? list_add(&fmt->lh, &formats) :
54238- list_add_tail(&fmt->lh, &formats);
54239+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
54240+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
54241 write_unlock(&binfmt_lock);
54242 }
54243
54244@@ -85,7 +112,7 @@ EXPORT_SYMBOL(__register_binfmt);
54245 void unregister_binfmt(struct linux_binfmt * fmt)
54246 {
54247 write_lock(&binfmt_lock);
54248- list_del(&fmt->lh);
54249+ pax_list_del((struct list_head *)&fmt->lh);
54250 write_unlock(&binfmt_lock);
54251 }
54252
54253@@ -180,18 +207,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
54254 int write)
54255 {
54256 struct page *page;
54257- int ret;
54258
54259-#ifdef CONFIG_STACK_GROWSUP
54260- if (write) {
54261- ret = expand_downwards(bprm->vma, pos);
54262- if (ret < 0)
54263- return NULL;
54264- }
54265-#endif
54266- ret = get_user_pages(current, bprm->mm, pos,
54267- 1, write, 1, &page, NULL);
54268- if (ret <= 0)
54269+ if (0 > expand_downwards(bprm->vma, pos))
54270+ return NULL;
54271+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
54272 return NULL;
54273
54274 if (write) {
54275@@ -207,6 +226,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
54276 if (size <= ARG_MAX)
54277 return page;
54278
54279+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54280+ // only allow 512KB for argv+env on suid/sgid binaries
54281+ // to prevent easy ASLR exhaustion
54282+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
54283+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
54284+ (size > (512 * 1024))) {
54285+ put_page(page);
54286+ return NULL;
54287+ }
54288+#endif
54289+
54290 /*
54291 * Limit to 1/4-th the stack size for the argv+env strings.
54292 * This ensures that:
54293@@ -266,6 +296,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
54294 vma->vm_end = STACK_TOP_MAX;
54295 vma->vm_start = vma->vm_end - PAGE_SIZE;
54296 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
54297+
54298+#ifdef CONFIG_PAX_SEGMEXEC
54299+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
54300+#endif
54301+
54302 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
54303 INIT_LIST_HEAD(&vma->anon_vma_chain);
54304
54305@@ -276,6 +311,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
54306 mm->stack_vm = mm->total_vm = 1;
54307 up_write(&mm->mmap_sem);
54308 bprm->p = vma->vm_end - sizeof(void *);
54309+
54310+#ifdef CONFIG_PAX_RANDUSTACK
54311+ if (randomize_va_space)
54312+ bprm->p ^= prandom_u32() & ~PAGE_MASK;
54313+#endif
54314+
54315 return 0;
54316 err:
54317 up_write(&mm->mmap_sem);
54318@@ -396,7 +437,7 @@ struct user_arg_ptr {
54319 } ptr;
54320 };
54321
54322-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
54323+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
54324 {
54325 const char __user *native;
54326
54327@@ -405,14 +446,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
54328 compat_uptr_t compat;
54329
54330 if (get_user(compat, argv.ptr.compat + nr))
54331- return ERR_PTR(-EFAULT);
54332+ return (const char __force_user *)ERR_PTR(-EFAULT);
54333
54334 return compat_ptr(compat);
54335 }
54336 #endif
54337
54338 if (get_user(native, argv.ptr.native + nr))
54339- return ERR_PTR(-EFAULT);
54340+ return (const char __force_user *)ERR_PTR(-EFAULT);
54341
54342 return native;
54343 }
54344@@ -431,7 +472,7 @@ static int count(struct user_arg_ptr argv, int max)
54345 if (!p)
54346 break;
54347
54348- if (IS_ERR(p))
54349+ if (IS_ERR((const char __force_kernel *)p))
54350 return -EFAULT;
54351
54352 if (i >= max)
54353@@ -466,7 +507,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
54354
54355 ret = -EFAULT;
54356 str = get_user_arg_ptr(argv, argc);
54357- if (IS_ERR(str))
54358+ if (IS_ERR((const char __force_kernel *)str))
54359 goto out;
54360
54361 len = strnlen_user(str, MAX_ARG_STRLEN);
54362@@ -548,7 +589,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
54363 int r;
54364 mm_segment_t oldfs = get_fs();
54365 struct user_arg_ptr argv = {
54366- .ptr.native = (const char __user *const __user *)__argv,
54367+ .ptr.native = (const char __force_user * const __force_user *)__argv,
54368 };
54369
54370 set_fs(KERNEL_DS);
54371@@ -583,7 +624,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
54372 unsigned long new_end = old_end - shift;
54373 struct mmu_gather tlb;
54374
54375- BUG_ON(new_start > new_end);
54376+ if (new_start >= new_end || new_start < mmap_min_addr)
54377+ return -ENOMEM;
54378
54379 /*
54380 * ensure there are no vmas between where we want to go
54381@@ -592,6 +634,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
54382 if (vma != find_vma(mm, new_start))
54383 return -EFAULT;
54384
54385+#ifdef CONFIG_PAX_SEGMEXEC
54386+ BUG_ON(pax_find_mirror_vma(vma));
54387+#endif
54388+
54389 /*
54390 * cover the whole range: [new_start, old_end)
54391 */
54392@@ -672,10 +718,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
54393 stack_top = arch_align_stack(stack_top);
54394 stack_top = PAGE_ALIGN(stack_top);
54395
54396- if (unlikely(stack_top < mmap_min_addr) ||
54397- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
54398- return -ENOMEM;
54399-
54400 stack_shift = vma->vm_end - stack_top;
54401
54402 bprm->p -= stack_shift;
54403@@ -687,8 +729,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
54404 bprm->exec -= stack_shift;
54405
54406 down_write(&mm->mmap_sem);
54407+
54408+ /* Move stack pages down in memory. */
54409+ if (stack_shift) {
54410+ ret = shift_arg_pages(vma, stack_shift);
54411+ if (ret)
54412+ goto out_unlock;
54413+ }
54414+
54415 vm_flags = VM_STACK_FLAGS;
54416
54417+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
54418+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
54419+ vm_flags &= ~VM_EXEC;
54420+
54421+#ifdef CONFIG_PAX_MPROTECT
54422+ if (mm->pax_flags & MF_PAX_MPROTECT)
54423+ vm_flags &= ~VM_MAYEXEC;
54424+#endif
54425+
54426+ }
54427+#endif
54428+
54429 /*
54430 * Adjust stack execute permissions; explicitly enable for
54431 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
54432@@ -707,13 +769,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
54433 goto out_unlock;
54434 BUG_ON(prev != vma);
54435
54436- /* Move stack pages down in memory. */
54437- if (stack_shift) {
54438- ret = shift_arg_pages(vma, stack_shift);
54439- if (ret)
54440- goto out_unlock;
54441- }
54442-
54443 /* mprotect_fixup is overkill to remove the temporary stack flags */
54444 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
54445
54446@@ -737,6 +792,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
54447 #endif
54448 current->mm->start_stack = bprm->p;
54449 ret = expand_stack(vma, stack_base);
54450+
54451+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
54452+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
54453+ unsigned long size;
54454+ vm_flags_t vm_flags;
54455+
54456+ size = STACK_TOP - vma->vm_end;
54457+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
54458+
54459+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
54460+
54461+#ifdef CONFIG_X86
54462+ if (!ret) {
54463+ size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
54464+ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
54465+ }
54466+#endif
54467+
54468+ }
54469+#endif
54470+
54471 if (ret)
54472 ret = -EFAULT;
54473
54474@@ -772,6 +848,8 @@ struct file *open_exec(const char *name)
54475
54476 fsnotify_open(file);
54477
54478+ trace_open_exec(name);
54479+
54480 err = deny_write_access(file);
54481 if (err)
54482 goto exit;
54483@@ -795,7 +873,7 @@ int kernel_read(struct file *file, loff_t offset,
54484 old_fs = get_fs();
54485 set_fs(get_ds());
54486 /* The cast to a user pointer is valid due to the set_fs() */
54487- result = vfs_read(file, (void __user *)addr, count, &pos);
54488+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
54489 set_fs(old_fs);
54490 return result;
54491 }
54492@@ -1251,7 +1329,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
54493 }
54494 rcu_read_unlock();
54495
54496- if (p->fs->users > n_fs) {
54497+ if (atomic_read(&p->fs->users) > n_fs) {
54498 bprm->unsafe |= LSM_UNSAFE_SHARE;
54499 } else {
54500 res = -EAGAIN;
54501@@ -1451,6 +1529,31 @@ int search_binary_handler(struct linux_binprm *bprm)
54502
54503 EXPORT_SYMBOL(search_binary_handler);
54504
54505+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54506+static DEFINE_PER_CPU(u64, exec_counter);
54507+static int __init init_exec_counters(void)
54508+{
54509+ unsigned int cpu;
54510+
54511+ for_each_possible_cpu(cpu) {
54512+ per_cpu(exec_counter, cpu) = (u64)cpu;
54513+ }
54514+
54515+ return 0;
54516+}
54517+early_initcall(init_exec_counters);
54518+static inline void increment_exec_counter(void)
54519+{
54520+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
54521+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
54522+}
54523+#else
54524+static inline void increment_exec_counter(void) {}
54525+#endif
54526+
54527+extern void gr_handle_exec_args(struct linux_binprm *bprm,
54528+ struct user_arg_ptr argv);
54529+
54530 /*
54531 * sys_execve() executes a new program.
54532 */
54533@@ -1458,6 +1561,11 @@ static int do_execve_common(const char *filename,
54534 struct user_arg_ptr argv,
54535 struct user_arg_ptr envp)
54536 {
54537+#ifdef CONFIG_GRKERNSEC
54538+ struct file *old_exec_file;
54539+ struct acl_subject_label *old_acl;
54540+ struct rlimit old_rlim[RLIM_NLIMITS];
54541+#endif
54542 struct linux_binprm *bprm;
54543 struct file *file;
54544 struct files_struct *displaced;
54545@@ -1465,6 +1573,8 @@ static int do_execve_common(const char *filename,
54546 int retval;
54547 const struct cred *cred = current_cred();
54548
54549+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&cred->user->processes), 1);
54550+
54551 /*
54552 * We move the actual failure in case of RLIMIT_NPROC excess from
54553 * set*uid() to execve() because too many poorly written programs
54554@@ -1505,12 +1615,22 @@ static int do_execve_common(const char *filename,
54555 if (IS_ERR(file))
54556 goto out_unmark;
54557
54558+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
54559+ retval = -EPERM;
54560+ goto out_file;
54561+ }
54562+
54563 sched_exec();
54564
54565 bprm->file = file;
54566 bprm->filename = filename;
54567 bprm->interp = filename;
54568
54569+ if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
54570+ retval = -EACCES;
54571+ goto out_file;
54572+ }
54573+
54574 retval = bprm_mm_init(bprm);
54575 if (retval)
54576 goto out_file;
54577@@ -1527,24 +1647,70 @@ static int do_execve_common(const char *filename,
54578 if (retval < 0)
54579 goto out;
54580
54581+#ifdef CONFIG_GRKERNSEC
54582+ old_acl = current->acl;
54583+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
54584+ old_exec_file = current->exec_file;
54585+ get_file(file);
54586+ current->exec_file = file;
54587+#endif
54588+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54589+ /* limit suid stack to 8MB
54590+ * we saved the old limits above and will restore them if this exec fails
54591+ */
54592+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
54593+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
54594+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
54595+#endif
54596+
54597+ if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) {
54598+ retval = -EPERM;
54599+ goto out_fail;
54600+ }
54601+
54602+ if (!gr_tpe_allow(file)) {
54603+ retval = -EACCES;
54604+ goto out_fail;
54605+ }
54606+
54607+ if (gr_check_crash_exec(file)) {
54608+ retval = -EACCES;
54609+ goto out_fail;
54610+ }
54611+
54612+ retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
54613+ bprm->unsafe);
54614+ if (retval < 0)
54615+ goto out_fail;
54616+
54617 retval = copy_strings_kernel(1, &bprm->filename, bprm);
54618 if (retval < 0)
54619- goto out;
54620+ goto out_fail;
54621
54622 bprm->exec = bprm->p;
54623 retval = copy_strings(bprm->envc, envp, bprm);
54624 if (retval < 0)
54625- goto out;
54626+ goto out_fail;
54627
54628 retval = copy_strings(bprm->argc, argv, bprm);
54629 if (retval < 0)
54630- goto out;
54631+ goto out_fail;
54632+
54633+ gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
54634+
54635+ gr_handle_exec_args(bprm, argv);
54636
54637 retval = search_binary_handler(bprm);
54638 if (retval < 0)
54639- goto out;
54640+ goto out_fail;
54641+#ifdef CONFIG_GRKERNSEC
54642+ if (old_exec_file)
54643+ fput(old_exec_file);
54644+#endif
54645
54646 /* execve succeeded */
54647+
54648+ increment_exec_counter();
54649 current->fs->in_exec = 0;
54650 current->in_execve = 0;
54651 acct_update_integrals(current);
54652@@ -1553,6 +1719,14 @@ static int do_execve_common(const char *filename,
54653 put_files_struct(displaced);
54654 return retval;
54655
54656+out_fail:
54657+#ifdef CONFIG_GRKERNSEC
54658+ current->acl = old_acl;
54659+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
54660+ fput(current->exec_file);
54661+ current->exec_file = old_exec_file;
54662+#endif
54663+
54664 out:
54665 if (bprm->mm) {
54666 acct_arg_size(bprm, 0);
54667@@ -1701,3 +1875,287 @@ asmlinkage long compat_sys_execve(const char __user * filename,
54668 return error;
54669 }
54670 #endif
54671+
54672+int pax_check_flags(unsigned long *flags)
54673+{
54674+ int retval = 0;
54675+
54676+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
54677+ if (*flags & MF_PAX_SEGMEXEC)
54678+ {
54679+ *flags &= ~MF_PAX_SEGMEXEC;
54680+ retval = -EINVAL;
54681+ }
54682+#endif
54683+
54684+ if ((*flags & MF_PAX_PAGEEXEC)
54685+
54686+#ifdef CONFIG_PAX_PAGEEXEC
54687+ && (*flags & MF_PAX_SEGMEXEC)
54688+#endif
54689+
54690+ )
54691+ {
54692+ *flags &= ~MF_PAX_PAGEEXEC;
54693+ retval = -EINVAL;
54694+ }
54695+
54696+ if ((*flags & MF_PAX_MPROTECT)
54697+
54698+#ifdef CONFIG_PAX_MPROTECT
54699+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
54700+#endif
54701+
54702+ )
54703+ {
54704+ *flags &= ~MF_PAX_MPROTECT;
54705+ retval = -EINVAL;
54706+ }
54707+
54708+ if ((*flags & MF_PAX_EMUTRAMP)
54709+
54710+#ifdef CONFIG_PAX_EMUTRAMP
54711+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
54712+#endif
54713+
54714+ )
54715+ {
54716+ *flags &= ~MF_PAX_EMUTRAMP;
54717+ retval = -EINVAL;
54718+ }
54719+
54720+ return retval;
54721+}
54722+
54723+EXPORT_SYMBOL(pax_check_flags);
54724+
54725+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
54726+char *pax_get_path(const struct path *path, char *buf, int buflen)
54727+{
54728+ char *pathname = d_path(path, buf, buflen);
54729+
54730+ if (IS_ERR(pathname))
54731+ goto toolong;
54732+
54733+ pathname = mangle_path(buf, pathname, "\t\n\\");
54734+ if (!pathname)
54735+ goto toolong;
54736+
54737+ *pathname = 0;
54738+ return buf;
54739+
54740+toolong:
54741+ return "<path too long>";
54742+}
54743+EXPORT_SYMBOL(pax_get_path);
54744+
54745+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
54746+{
54747+ struct task_struct *tsk = current;
54748+ struct mm_struct *mm = current->mm;
54749+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
54750+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
54751+ char *path_exec = NULL;
54752+ char *path_fault = NULL;
54753+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
54754+ siginfo_t info = { };
54755+
54756+ if (buffer_exec && buffer_fault) {
54757+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
54758+
54759+ down_read(&mm->mmap_sem);
54760+ vma = mm->mmap;
54761+ while (vma && (!vma_exec || !vma_fault)) {
54762+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
54763+ vma_exec = vma;
54764+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
54765+ vma_fault = vma;
54766+ vma = vma->vm_next;
54767+ }
54768+ if (vma_exec)
54769+ path_exec = pax_get_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
54770+ if (vma_fault) {
54771+ start = vma_fault->vm_start;
54772+ end = vma_fault->vm_end;
54773+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
54774+ if (vma_fault->vm_file)
54775+ path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
54776+ else if ((unsigned long)pc >= mm->start_brk && (unsigned long)pc < mm->brk)
54777+ path_fault = "<heap>";
54778+ else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
54779+ path_fault = "<stack>";
54780+ else
54781+ path_fault = "<anonymous mapping>";
54782+ }
54783+ up_read(&mm->mmap_sem);
54784+ }
54785+ if (tsk->signal->curr_ip)
54786+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
54787+ else
54788+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
54789+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
54790+ from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
54791+ free_page((unsigned long)buffer_exec);
54792+ free_page((unsigned long)buffer_fault);
54793+ pax_report_insns(regs, pc, sp);
54794+ info.si_signo = SIGKILL;
54795+ info.si_errno = 0;
54796+ info.si_code = SI_KERNEL;
54797+ info.si_pid = 0;
54798+ info.si_uid = 0;
54799+ do_coredump(&info);
54800+}
54801+#endif
54802+
54803+#ifdef CONFIG_PAX_REFCOUNT
54804+void pax_report_refcount_overflow(struct pt_regs *regs)
54805+{
54806+ if (current->signal->curr_ip)
54807+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
54808+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
54809+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
54810+ else
54811+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
54812+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
54813+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
54814+ preempt_disable();
54815+ show_regs(regs);
54816+ preempt_enable();
54817+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
54818+}
54819+#endif
54820+
54821+#ifdef CONFIG_PAX_USERCOPY
54822+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
54823+static noinline int check_stack_object(const void *obj, unsigned long len)
54824+{
54825+ const void * const stack = task_stack_page(current);
54826+ const void * const stackend = stack + THREAD_SIZE;
54827+
54828+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
54829+ const void *frame = NULL;
54830+ const void *oldframe;
54831+#endif
54832+
54833+ if (obj + len < obj)
54834+ return -1;
54835+
54836+ if (obj + len <= stack || stackend <= obj)
54837+ return 0;
54838+
54839+ if (obj < stack || stackend < obj + len)
54840+ return -1;
54841+
54842+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
54843+ oldframe = __builtin_frame_address(1);
54844+ if (oldframe)
54845+ frame = __builtin_frame_address(2);
54846+ /*
54847+ low ----------------------------------------------> high
54848+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
54849+ ^----------------^
54850+ allow copies only within here
54851+ */
54852+ while (stack <= frame && frame < stackend) {
54853+ /* if obj + len extends past the last frame, this
54854+ check won't pass and the next frame will be 0,
54855+ causing us to bail out and correctly report
54856+ the copy as invalid
54857+ */
54858+ if (obj + len <= frame)
54859+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
54860+ oldframe = frame;
54861+ frame = *(const void * const *)frame;
54862+ }
54863+ return -1;
54864+#else
54865+ return 1;
54866+#endif
54867+}
54868+
54869+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
54870+{
54871+ if (current->signal->curr_ip)
54872+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
54873+ &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
54874+ else
54875+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
54876+ to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
54877+ dump_stack();
54878+ gr_handle_kernel_exploit();
54879+ do_group_exit(SIGKILL);
54880+}
54881+#endif
54882+
54883+#ifdef CONFIG_PAX_USERCOPY
54884+static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
54885+{
54886+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
54887+ unsigned long textlow = ktla_ktva((unsigned long)_stext);
54888+#ifdef CONFIG_MODULES
54889+ unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
54890+#else
54891+ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
54892+#endif
54893+
54894+#else
54895+ unsigned long textlow = (unsigned long)_stext;
54896+ unsigned long texthigh = (unsigned long)_etext;
54897+#endif
54898+
54899+ if (high <= textlow || low > texthigh)
54900+ return false;
54901+ else
54902+ return true;
54903+}
54904+#endif
54905+
54906+void __check_object_size(const void *ptr, unsigned long n, bool to_user)
54907+{
54908+
54909+#ifdef CONFIG_PAX_USERCOPY
54910+ const char *type;
54911+
54912+ if (!n)
54913+ return;
54914+
54915+ type = check_heap_object(ptr, n);
54916+ if (!type) {
54917+ int ret = check_stack_object(ptr, n);
54918+ if (ret == 1 || ret == 2)
54919+ return;
54920+ if (ret == 0) {
54921+ if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
54922+ type = "<kernel text>";
54923+ else
54924+ return;
54925+ } else
54926+ type = "<process stack>";
54927+ }
54928+
54929+ pax_report_usercopy(ptr, n, to_user, type);
54930+#endif
54931+
54932+}
54933+EXPORT_SYMBOL(__check_object_size);
54934+
54935+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
54936+void pax_track_stack(void)
54937+{
54938+ unsigned long sp = (unsigned long)&sp;
54939+ if (sp < current_thread_info()->lowest_stack &&
54940+ sp > (unsigned long)task_stack_page(current))
54941+ current_thread_info()->lowest_stack = sp;
54942+}
54943+EXPORT_SYMBOL(pax_track_stack);
54944+#endif
54945+
54946+#ifdef CONFIG_PAX_SIZE_OVERFLOW
54947+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
54948+{
54949+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
54950+ dump_stack();
54951+ do_group_exit(SIGKILL);
54952+}
54953+EXPORT_SYMBOL(report_size_overflow);
54954+#endif
54955diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
54956index 9f9992b..8b59411 100644
54957--- a/fs/ext2/balloc.c
54958+++ b/fs/ext2/balloc.c
54959@@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
54960
54961 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
54962 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
54963- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
54964+ if (free_blocks < root_blocks + 1 &&
54965 !uid_eq(sbi->s_resuid, current_fsuid()) &&
54966 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
54967- !in_group_p (sbi->s_resgid))) {
54968+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
54969 return 0;
54970 }
54971 return 1;
54972diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
54973index 22548f5..41521d8 100644
54974--- a/fs/ext3/balloc.c
54975+++ b/fs/ext3/balloc.c
54976@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
54977
54978 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
54979 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
54980- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
54981+ if (free_blocks < root_blocks + 1 &&
54982 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
54983 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
54984- !in_group_p (sbi->s_resgid))) {
54985+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
54986 return 0;
54987 }
54988 return 1;
54989diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
54990index 3742e4c..69a797f 100644
54991--- a/fs/ext4/balloc.c
54992+++ b/fs/ext4/balloc.c
54993@@ -528,8 +528,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
54994 /* Hm, nope. Are (enough) root reserved clusters available? */
54995 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
54996 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
54997- capable(CAP_SYS_RESOURCE) ||
54998- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
54999+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
55000+ capable_nolog(CAP_SYS_RESOURCE)) {
55001
55002 if (free_clusters >= (nclusters + dirty_clusters +
55003 resv_clusters))
55004diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
55005index 5aae3d1..b5da7f8 100644
55006--- a/fs/ext4/ext4.h
55007+++ b/fs/ext4/ext4.h
55008@@ -1252,19 +1252,19 @@ struct ext4_sb_info {
55009 unsigned long s_mb_last_start;
55010
55011 /* stats for buddy allocator */
55012- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
55013- atomic_t s_bal_success; /* we found long enough chunks */
55014- atomic_t s_bal_allocated; /* in blocks */
55015- atomic_t s_bal_ex_scanned; /* total extents scanned */
55016- atomic_t s_bal_goals; /* goal hits */
55017- atomic_t s_bal_breaks; /* too long searches */
55018- atomic_t s_bal_2orders; /* 2^order hits */
55019+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
55020+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
55021+ atomic_unchecked_t s_bal_allocated; /* in blocks */
55022+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
55023+ atomic_unchecked_t s_bal_goals; /* goal hits */
55024+ atomic_unchecked_t s_bal_breaks; /* too long searches */
55025+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
55026 spinlock_t s_bal_lock;
55027 unsigned long s_mb_buddies_generated;
55028 unsigned long long s_mb_generation_time;
55029- atomic_t s_mb_lost_chunks;
55030- atomic_t s_mb_preallocated;
55031- atomic_t s_mb_discarded;
55032+ atomic_unchecked_t s_mb_lost_chunks;
55033+ atomic_unchecked_t s_mb_preallocated;
55034+ atomic_unchecked_t s_mb_discarded;
55035 atomic_t s_lock_busy;
55036
55037 /* locality groups */
55038diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
55039index 59c6750..a549154 100644
55040--- a/fs/ext4/mballoc.c
55041+++ b/fs/ext4/mballoc.c
55042@@ -1865,7 +1865,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
55043 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
55044
55045 if (EXT4_SB(sb)->s_mb_stats)
55046- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
55047+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
55048
55049 break;
55050 }
55051@@ -2170,7 +2170,7 @@ repeat:
55052 ac->ac_status = AC_STATUS_CONTINUE;
55053 ac->ac_flags |= EXT4_MB_HINT_FIRST;
55054 cr = 3;
55055- atomic_inc(&sbi->s_mb_lost_chunks);
55056+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
55057 goto repeat;
55058 }
55059 }
55060@@ -2678,25 +2678,25 @@ int ext4_mb_release(struct super_block *sb)
55061 if (sbi->s_mb_stats) {
55062 ext4_msg(sb, KERN_INFO,
55063 "mballoc: %u blocks %u reqs (%u success)",
55064- atomic_read(&sbi->s_bal_allocated),
55065- atomic_read(&sbi->s_bal_reqs),
55066- atomic_read(&sbi->s_bal_success));
55067+ atomic_read_unchecked(&sbi->s_bal_allocated),
55068+ atomic_read_unchecked(&sbi->s_bal_reqs),
55069+ atomic_read_unchecked(&sbi->s_bal_success));
55070 ext4_msg(sb, KERN_INFO,
55071 "mballoc: %u extents scanned, %u goal hits, "
55072 "%u 2^N hits, %u breaks, %u lost",
55073- atomic_read(&sbi->s_bal_ex_scanned),
55074- atomic_read(&sbi->s_bal_goals),
55075- atomic_read(&sbi->s_bal_2orders),
55076- atomic_read(&sbi->s_bal_breaks),
55077- atomic_read(&sbi->s_mb_lost_chunks));
55078+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
55079+ atomic_read_unchecked(&sbi->s_bal_goals),
55080+ atomic_read_unchecked(&sbi->s_bal_2orders),
55081+ atomic_read_unchecked(&sbi->s_bal_breaks),
55082+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
55083 ext4_msg(sb, KERN_INFO,
55084 "mballoc: %lu generated and it took %Lu",
55085 sbi->s_mb_buddies_generated,
55086 sbi->s_mb_generation_time);
55087 ext4_msg(sb, KERN_INFO,
55088 "mballoc: %u preallocated, %u discarded",
55089- atomic_read(&sbi->s_mb_preallocated),
55090- atomic_read(&sbi->s_mb_discarded));
55091+ atomic_read_unchecked(&sbi->s_mb_preallocated),
55092+ atomic_read_unchecked(&sbi->s_mb_discarded));
55093 }
55094
55095 free_percpu(sbi->s_locality_groups);
55096@@ -3150,16 +3150,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
55097 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
55098
55099 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
55100- atomic_inc(&sbi->s_bal_reqs);
55101- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
55102+ atomic_inc_unchecked(&sbi->s_bal_reqs);
55103+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
55104 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
55105- atomic_inc(&sbi->s_bal_success);
55106- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
55107+ atomic_inc_unchecked(&sbi->s_bal_success);
55108+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
55109 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
55110 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
55111- atomic_inc(&sbi->s_bal_goals);
55112+ atomic_inc_unchecked(&sbi->s_bal_goals);
55113 if (ac->ac_found > sbi->s_mb_max_to_scan)
55114- atomic_inc(&sbi->s_bal_breaks);
55115+ atomic_inc_unchecked(&sbi->s_bal_breaks);
55116 }
55117
55118 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
55119@@ -3559,7 +3559,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
55120 trace_ext4_mb_new_inode_pa(ac, pa);
55121
55122 ext4_mb_use_inode_pa(ac, pa);
55123- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
55124+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
55125
55126 ei = EXT4_I(ac->ac_inode);
55127 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
55128@@ -3619,7 +3619,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
55129 trace_ext4_mb_new_group_pa(ac, pa);
55130
55131 ext4_mb_use_group_pa(ac, pa);
55132- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
55133+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
55134
55135 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
55136 lg = ac->ac_lg;
55137@@ -3708,7 +3708,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
55138 * from the bitmap and continue.
55139 */
55140 }
55141- atomic_add(free, &sbi->s_mb_discarded);
55142+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
55143
55144 return err;
55145 }
55146@@ -3726,7 +3726,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
55147 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
55148 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
55149 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
55150- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
55151+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
55152 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
55153
55154 return 0;
55155diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
55156index 214461e..3614c89 100644
55157--- a/fs/ext4/mmp.c
55158+++ b/fs/ext4/mmp.c
55159@@ -113,7 +113,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
55160 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
55161 const char *function, unsigned int line, const char *msg)
55162 {
55163- __ext4_warning(sb, function, line, msg);
55164+ __ext4_warning(sb, function, line, "%s", msg);
55165 __ext4_warning(sb, function, line,
55166 "MMP failure info: last update time: %llu, last update "
55167 "node: %s, last update device: %s\n",
55168diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
55169index 49d3c01..9579efd 100644
55170--- a/fs/ext4/resize.c
55171+++ b/fs/ext4/resize.c
55172@@ -79,12 +79,20 @@ static int verify_group_input(struct super_block *sb,
55173 ext4_fsblk_t end = start + input->blocks_count;
55174 ext4_group_t group = input->group;
55175 ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group;
55176- unsigned overhead = ext4_group_overhead_blocks(sb, group);
55177- ext4_fsblk_t metaend = start + overhead;
55178+ unsigned overhead;
55179+ ext4_fsblk_t metaend;
55180 struct buffer_head *bh = NULL;
55181 ext4_grpblk_t free_blocks_count, offset;
55182 int err = -EINVAL;
55183
55184+ if (group != sbi->s_groups_count) {
55185+ ext4_warning(sb, "Cannot add at group %u (only %u groups)",
55186+ input->group, sbi->s_groups_count);
55187+ return -EINVAL;
55188+ }
55189+
55190+ overhead = ext4_group_overhead_blocks(sb, group);
55191+ metaend = start + overhead;
55192 input->free_blocks_count = free_blocks_count =
55193 input->blocks_count - 2 - overhead - sbi->s_itb_per_group;
55194
55195@@ -96,10 +104,7 @@ static int verify_group_input(struct super_block *sb,
55196 free_blocks_count, input->reserved_blocks);
55197
55198 ext4_get_group_no_and_offset(sb, start, NULL, &offset);
55199- if (group != sbi->s_groups_count)
55200- ext4_warning(sb, "Cannot add at group %u (only %u groups)",
55201- input->group, sbi->s_groups_count);
55202- else if (offset != 0)
55203+ if (offset != 0)
55204 ext4_warning(sb, "Last group not full");
55205 else if (input->reserved_blocks > input->blocks_count / 5)
55206 ext4_warning(sb, "Reserved blocks too high (%u)",
55207diff --git a/fs/ext4/super.c b/fs/ext4/super.c
55208index 3f7c39e..227f24f 100644
55209--- a/fs/ext4/super.c
55210+++ b/fs/ext4/super.c
55211@@ -1236,7 +1236,7 @@ static ext4_fsblk_t get_sb_block(void **data)
55212 }
55213
55214 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
55215-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
55216+static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
55217 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
55218
55219 #ifdef CONFIG_QUOTA
55220@@ -2372,7 +2372,7 @@ struct ext4_attr {
55221 ssize_t (*store)(struct ext4_attr *, struct ext4_sb_info *,
55222 const char *, size_t);
55223 int offset;
55224-};
55225+} __do_const;
55226
55227 static int parse_strtoull(const char *buf,
55228 unsigned long long max, unsigned long long *value)
55229diff --git a/fs/fcntl.c b/fs/fcntl.c
55230index 6599222..e7bf0de 100644
55231--- a/fs/fcntl.c
55232+++ b/fs/fcntl.c
55233@@ -107,6 +107,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
55234 if (err)
55235 return err;
55236
55237+ if (gr_handle_chroot_fowner(pid, type))
55238+ return -ENOENT;
55239+ if (gr_check_protected_task_fowner(pid, type))
55240+ return -EACCES;
55241+
55242 f_modown(filp, pid, type, force);
55243 return 0;
55244 }
55245diff --git a/fs/fhandle.c b/fs/fhandle.c
55246index 999ff5c..41f4109 100644
55247--- a/fs/fhandle.c
55248+++ b/fs/fhandle.c
55249@@ -67,8 +67,7 @@ static long do_sys_name_to_handle(struct path *path,
55250 } else
55251 retval = 0;
55252 /* copy the mount id */
55253- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
55254- sizeof(*mnt_id)) ||
55255+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
55256 copy_to_user(ufh, handle,
55257 sizeof(struct file_handle) + handle_bytes))
55258 retval = -EFAULT;
55259diff --git a/fs/file.c b/fs/file.c
55260index 4a78f98..9447397 100644
55261--- a/fs/file.c
55262+++ b/fs/file.c
55263@@ -16,6 +16,7 @@
55264 #include <linux/slab.h>
55265 #include <linux/vmalloc.h>
55266 #include <linux/file.h>
55267+#include <linux/security.h>
55268 #include <linux/fdtable.h>
55269 #include <linux/bitops.h>
55270 #include <linux/interrupt.h>
55271@@ -828,6 +829,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
55272 if (!file)
55273 return __close_fd(files, fd);
55274
55275+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
55276 if (fd >= rlimit(RLIMIT_NOFILE))
55277 return -EBADF;
55278
55279@@ -854,6 +856,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
55280 if (unlikely(oldfd == newfd))
55281 return -EINVAL;
55282
55283+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
55284 if (newfd >= rlimit(RLIMIT_NOFILE))
55285 return -EBADF;
55286
55287@@ -909,6 +912,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
55288 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
55289 {
55290 int err;
55291+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
55292 if (from >= rlimit(RLIMIT_NOFILE))
55293 return -EINVAL;
55294 err = alloc_fd(from, flags);
55295diff --git a/fs/filesystems.c b/fs/filesystems.c
55296index 92567d9..fcd8cbf 100644
55297--- a/fs/filesystems.c
55298+++ b/fs/filesystems.c
55299@@ -273,7 +273,11 @@ struct file_system_type *get_fs_type(const char *name)
55300 int len = dot ? dot - name : strlen(name);
55301
55302 fs = __get_fs_type(name, len);
55303+#ifdef CONFIG_GRKERNSEC_MODHARDEN
55304+ if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
55305+#else
55306 if (!fs && (request_module("fs-%.*s", len, name) == 0))
55307+#endif
55308 fs = __get_fs_type(name, len);
55309
55310 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
55311diff --git a/fs/fs_struct.c b/fs/fs_struct.c
55312index d8ac61d..79a36f0 100644
55313--- a/fs/fs_struct.c
55314+++ b/fs/fs_struct.c
55315@@ -4,6 +4,7 @@
55316 #include <linux/path.h>
55317 #include <linux/slab.h>
55318 #include <linux/fs_struct.h>
55319+#include <linux/grsecurity.h>
55320 #include "internal.h"
55321
55322 /*
55323@@ -19,6 +20,7 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
55324 write_seqcount_begin(&fs->seq);
55325 old_root = fs->root;
55326 fs->root = *path;
55327+ gr_set_chroot_entries(current, path);
55328 write_seqcount_end(&fs->seq);
55329 spin_unlock(&fs->lock);
55330 if (old_root.dentry)
55331@@ -67,6 +69,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
55332 int hits = 0;
55333 spin_lock(&fs->lock);
55334 write_seqcount_begin(&fs->seq);
55335+ /* this root replacement is only done by pivot_root,
55336+ leave grsec's chroot tagging alone for this task
55337+ so that a pivoted root isn't treated as a chroot
55338+ */
55339 hits += replace_path(&fs->root, old_root, new_root);
55340 hits += replace_path(&fs->pwd, old_root, new_root);
55341 write_seqcount_end(&fs->seq);
55342@@ -99,7 +105,8 @@ void exit_fs(struct task_struct *tsk)
55343 task_lock(tsk);
55344 spin_lock(&fs->lock);
55345 tsk->fs = NULL;
55346- kill = !--fs->users;
55347+ gr_clear_chroot_entries(tsk);
55348+ kill = !atomic_dec_return(&fs->users);
55349 spin_unlock(&fs->lock);
55350 task_unlock(tsk);
55351 if (kill)
55352@@ -112,7 +119,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
55353 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
55354 /* We don't need to lock fs - think why ;-) */
55355 if (fs) {
55356- fs->users = 1;
55357+ atomic_set(&fs->users, 1);
55358 fs->in_exec = 0;
55359 spin_lock_init(&fs->lock);
55360 seqcount_init(&fs->seq);
55361@@ -121,6 +128,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
55362 spin_lock(&old->lock);
55363 fs->root = old->root;
55364 path_get(&fs->root);
55365+ /* instead of calling gr_set_chroot_entries here,
55366+ we call it from every caller of this function
55367+ */
55368 fs->pwd = old->pwd;
55369 path_get(&fs->pwd);
55370 spin_unlock(&old->lock);
55371@@ -139,8 +149,9 @@ int unshare_fs_struct(void)
55372
55373 task_lock(current);
55374 spin_lock(&fs->lock);
55375- kill = !--fs->users;
55376+ kill = !atomic_dec_return(&fs->users);
55377 current->fs = new_fs;
55378+ gr_set_chroot_entries(current, &new_fs->root);
55379 spin_unlock(&fs->lock);
55380 task_unlock(current);
55381
55382@@ -153,13 +164,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
55383
55384 int current_umask(void)
55385 {
55386- return current->fs->umask;
55387+ return current->fs->umask | gr_acl_umask();
55388 }
55389 EXPORT_SYMBOL(current_umask);
55390
55391 /* to be mentioned only in INIT_TASK */
55392 struct fs_struct init_fs = {
55393- .users = 1,
55394+ .users = ATOMIC_INIT(1),
55395 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
55396 .seq = SEQCNT_ZERO,
55397 .umask = 0022,
55398diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
55399index e2cba1f..17a25bb 100644
55400--- a/fs/fscache/cookie.c
55401+++ b/fs/fscache/cookie.c
55402@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
55403 parent ? (char *) parent->def->name : "<no-parent>",
55404 def->name, netfs_data);
55405
55406- fscache_stat(&fscache_n_acquires);
55407+ fscache_stat_unchecked(&fscache_n_acquires);
55408
55409 /* if there's no parent cookie, then we don't create one here either */
55410 if (!parent) {
55411- fscache_stat(&fscache_n_acquires_null);
55412+ fscache_stat_unchecked(&fscache_n_acquires_null);
55413 _leave(" [no parent]");
55414 return NULL;
55415 }
55416@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
55417 /* allocate and initialise a cookie */
55418 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
55419 if (!cookie) {
55420- fscache_stat(&fscache_n_acquires_oom);
55421+ fscache_stat_unchecked(&fscache_n_acquires_oom);
55422 _leave(" [ENOMEM]");
55423 return NULL;
55424 }
55425@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
55426
55427 switch (cookie->def->type) {
55428 case FSCACHE_COOKIE_TYPE_INDEX:
55429- fscache_stat(&fscache_n_cookie_index);
55430+ fscache_stat_unchecked(&fscache_n_cookie_index);
55431 break;
55432 case FSCACHE_COOKIE_TYPE_DATAFILE:
55433- fscache_stat(&fscache_n_cookie_data);
55434+ fscache_stat_unchecked(&fscache_n_cookie_data);
55435 break;
55436 default:
55437- fscache_stat(&fscache_n_cookie_special);
55438+ fscache_stat_unchecked(&fscache_n_cookie_special);
55439 break;
55440 }
55441
55442@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
55443 if (fscache_acquire_non_index_cookie(cookie) < 0) {
55444 atomic_dec(&parent->n_children);
55445 __fscache_cookie_put(cookie);
55446- fscache_stat(&fscache_n_acquires_nobufs);
55447+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
55448 _leave(" = NULL");
55449 return NULL;
55450 }
55451 }
55452
55453- fscache_stat(&fscache_n_acquires_ok);
55454+ fscache_stat_unchecked(&fscache_n_acquires_ok);
55455 _leave(" = %p", cookie);
55456 return cookie;
55457 }
55458@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
55459 cache = fscache_select_cache_for_object(cookie->parent);
55460 if (!cache) {
55461 up_read(&fscache_addremove_sem);
55462- fscache_stat(&fscache_n_acquires_no_cache);
55463+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
55464 _leave(" = -ENOMEDIUM [no cache]");
55465 return -ENOMEDIUM;
55466 }
55467@@ -255,12 +255,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
55468 object = cache->ops->alloc_object(cache, cookie);
55469 fscache_stat_d(&fscache_n_cop_alloc_object);
55470 if (IS_ERR(object)) {
55471- fscache_stat(&fscache_n_object_no_alloc);
55472+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
55473 ret = PTR_ERR(object);
55474 goto error;
55475 }
55476
55477- fscache_stat(&fscache_n_object_alloc);
55478+ fscache_stat_unchecked(&fscache_n_object_alloc);
55479
55480 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
55481
55482@@ -376,7 +376,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
55483
55484 _enter("{%s}", cookie->def->name);
55485
55486- fscache_stat(&fscache_n_invalidates);
55487+ fscache_stat_unchecked(&fscache_n_invalidates);
55488
55489 /* Only permit invalidation of data files. Invalidating an index will
55490 * require the caller to release all its attachments to the tree rooted
55491@@ -434,10 +434,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
55492 {
55493 struct fscache_object *object;
55494
55495- fscache_stat(&fscache_n_updates);
55496+ fscache_stat_unchecked(&fscache_n_updates);
55497
55498 if (!cookie) {
55499- fscache_stat(&fscache_n_updates_null);
55500+ fscache_stat_unchecked(&fscache_n_updates_null);
55501 _leave(" [no cookie]");
55502 return;
55503 }
55504@@ -471,12 +471,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
55505 struct fscache_object *object;
55506 unsigned long event;
55507
55508- fscache_stat(&fscache_n_relinquishes);
55509+ fscache_stat_unchecked(&fscache_n_relinquishes);
55510 if (retire)
55511- fscache_stat(&fscache_n_relinquishes_retire);
55512+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
55513
55514 if (!cookie) {
55515- fscache_stat(&fscache_n_relinquishes_null);
55516+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
55517 _leave(" [no cookie]");
55518 return;
55519 }
55520@@ -492,7 +492,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
55521
55522 /* wait for the cookie to finish being instantiated (or to fail) */
55523 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
55524- fscache_stat(&fscache_n_relinquishes_waitcrt);
55525+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
55526 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
55527 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
55528 }
55529diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
55530index ee38fef..0a326d4 100644
55531--- a/fs/fscache/internal.h
55532+++ b/fs/fscache/internal.h
55533@@ -148,101 +148,101 @@ extern void fscache_proc_cleanup(void);
55534 * stats.c
55535 */
55536 #ifdef CONFIG_FSCACHE_STATS
55537-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
55538-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
55539+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
55540+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
55541
55542-extern atomic_t fscache_n_op_pend;
55543-extern atomic_t fscache_n_op_run;
55544-extern atomic_t fscache_n_op_enqueue;
55545-extern atomic_t fscache_n_op_deferred_release;
55546-extern atomic_t fscache_n_op_release;
55547-extern atomic_t fscache_n_op_gc;
55548-extern atomic_t fscache_n_op_cancelled;
55549-extern atomic_t fscache_n_op_rejected;
55550+extern atomic_unchecked_t fscache_n_op_pend;
55551+extern atomic_unchecked_t fscache_n_op_run;
55552+extern atomic_unchecked_t fscache_n_op_enqueue;
55553+extern atomic_unchecked_t fscache_n_op_deferred_release;
55554+extern atomic_unchecked_t fscache_n_op_release;
55555+extern atomic_unchecked_t fscache_n_op_gc;
55556+extern atomic_unchecked_t fscache_n_op_cancelled;
55557+extern atomic_unchecked_t fscache_n_op_rejected;
55558
55559-extern atomic_t fscache_n_attr_changed;
55560-extern atomic_t fscache_n_attr_changed_ok;
55561-extern atomic_t fscache_n_attr_changed_nobufs;
55562-extern atomic_t fscache_n_attr_changed_nomem;
55563-extern atomic_t fscache_n_attr_changed_calls;
55564+extern atomic_unchecked_t fscache_n_attr_changed;
55565+extern atomic_unchecked_t fscache_n_attr_changed_ok;
55566+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
55567+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
55568+extern atomic_unchecked_t fscache_n_attr_changed_calls;
55569
55570-extern atomic_t fscache_n_allocs;
55571-extern atomic_t fscache_n_allocs_ok;
55572-extern atomic_t fscache_n_allocs_wait;
55573-extern atomic_t fscache_n_allocs_nobufs;
55574-extern atomic_t fscache_n_allocs_intr;
55575-extern atomic_t fscache_n_allocs_object_dead;
55576-extern atomic_t fscache_n_alloc_ops;
55577-extern atomic_t fscache_n_alloc_op_waits;
55578+extern atomic_unchecked_t fscache_n_allocs;
55579+extern atomic_unchecked_t fscache_n_allocs_ok;
55580+extern atomic_unchecked_t fscache_n_allocs_wait;
55581+extern atomic_unchecked_t fscache_n_allocs_nobufs;
55582+extern atomic_unchecked_t fscache_n_allocs_intr;
55583+extern atomic_unchecked_t fscache_n_allocs_object_dead;
55584+extern atomic_unchecked_t fscache_n_alloc_ops;
55585+extern atomic_unchecked_t fscache_n_alloc_op_waits;
55586
55587-extern atomic_t fscache_n_retrievals;
55588-extern atomic_t fscache_n_retrievals_ok;
55589-extern atomic_t fscache_n_retrievals_wait;
55590-extern atomic_t fscache_n_retrievals_nodata;
55591-extern atomic_t fscache_n_retrievals_nobufs;
55592-extern atomic_t fscache_n_retrievals_intr;
55593-extern atomic_t fscache_n_retrievals_nomem;
55594-extern atomic_t fscache_n_retrievals_object_dead;
55595-extern atomic_t fscache_n_retrieval_ops;
55596-extern atomic_t fscache_n_retrieval_op_waits;
55597+extern atomic_unchecked_t fscache_n_retrievals;
55598+extern atomic_unchecked_t fscache_n_retrievals_ok;
55599+extern atomic_unchecked_t fscache_n_retrievals_wait;
55600+extern atomic_unchecked_t fscache_n_retrievals_nodata;
55601+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
55602+extern atomic_unchecked_t fscache_n_retrievals_intr;
55603+extern atomic_unchecked_t fscache_n_retrievals_nomem;
55604+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
55605+extern atomic_unchecked_t fscache_n_retrieval_ops;
55606+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
55607
55608-extern atomic_t fscache_n_stores;
55609-extern atomic_t fscache_n_stores_ok;
55610-extern atomic_t fscache_n_stores_again;
55611-extern atomic_t fscache_n_stores_nobufs;
55612-extern atomic_t fscache_n_stores_oom;
55613-extern atomic_t fscache_n_store_ops;
55614-extern atomic_t fscache_n_store_calls;
55615-extern atomic_t fscache_n_store_pages;
55616-extern atomic_t fscache_n_store_radix_deletes;
55617-extern atomic_t fscache_n_store_pages_over_limit;
55618+extern atomic_unchecked_t fscache_n_stores;
55619+extern atomic_unchecked_t fscache_n_stores_ok;
55620+extern atomic_unchecked_t fscache_n_stores_again;
55621+extern atomic_unchecked_t fscache_n_stores_nobufs;
55622+extern atomic_unchecked_t fscache_n_stores_oom;
55623+extern atomic_unchecked_t fscache_n_store_ops;
55624+extern atomic_unchecked_t fscache_n_store_calls;
55625+extern atomic_unchecked_t fscache_n_store_pages;
55626+extern atomic_unchecked_t fscache_n_store_radix_deletes;
55627+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
55628
55629-extern atomic_t fscache_n_store_vmscan_not_storing;
55630-extern atomic_t fscache_n_store_vmscan_gone;
55631-extern atomic_t fscache_n_store_vmscan_busy;
55632-extern atomic_t fscache_n_store_vmscan_cancelled;
55633-extern atomic_t fscache_n_store_vmscan_wait;
55634+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
55635+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
55636+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
55637+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
55638+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
55639
55640-extern atomic_t fscache_n_marks;
55641-extern atomic_t fscache_n_uncaches;
55642+extern atomic_unchecked_t fscache_n_marks;
55643+extern atomic_unchecked_t fscache_n_uncaches;
55644
55645-extern atomic_t fscache_n_acquires;
55646-extern atomic_t fscache_n_acquires_null;
55647-extern atomic_t fscache_n_acquires_no_cache;
55648-extern atomic_t fscache_n_acquires_ok;
55649-extern atomic_t fscache_n_acquires_nobufs;
55650-extern atomic_t fscache_n_acquires_oom;
55651+extern atomic_unchecked_t fscache_n_acquires;
55652+extern atomic_unchecked_t fscache_n_acquires_null;
55653+extern atomic_unchecked_t fscache_n_acquires_no_cache;
55654+extern atomic_unchecked_t fscache_n_acquires_ok;
55655+extern atomic_unchecked_t fscache_n_acquires_nobufs;
55656+extern atomic_unchecked_t fscache_n_acquires_oom;
55657
55658-extern atomic_t fscache_n_invalidates;
55659-extern atomic_t fscache_n_invalidates_run;
55660+extern atomic_unchecked_t fscache_n_invalidates;
55661+extern atomic_unchecked_t fscache_n_invalidates_run;
55662
55663-extern atomic_t fscache_n_updates;
55664-extern atomic_t fscache_n_updates_null;
55665-extern atomic_t fscache_n_updates_run;
55666+extern atomic_unchecked_t fscache_n_updates;
55667+extern atomic_unchecked_t fscache_n_updates_null;
55668+extern atomic_unchecked_t fscache_n_updates_run;
55669
55670-extern atomic_t fscache_n_relinquishes;
55671-extern atomic_t fscache_n_relinquishes_null;
55672-extern atomic_t fscache_n_relinquishes_waitcrt;
55673-extern atomic_t fscache_n_relinquishes_retire;
55674+extern atomic_unchecked_t fscache_n_relinquishes;
55675+extern atomic_unchecked_t fscache_n_relinquishes_null;
55676+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
55677+extern atomic_unchecked_t fscache_n_relinquishes_retire;
55678
55679-extern atomic_t fscache_n_cookie_index;
55680-extern atomic_t fscache_n_cookie_data;
55681-extern atomic_t fscache_n_cookie_special;
55682+extern atomic_unchecked_t fscache_n_cookie_index;
55683+extern atomic_unchecked_t fscache_n_cookie_data;
55684+extern atomic_unchecked_t fscache_n_cookie_special;
55685
55686-extern atomic_t fscache_n_object_alloc;
55687-extern atomic_t fscache_n_object_no_alloc;
55688-extern atomic_t fscache_n_object_lookups;
55689-extern atomic_t fscache_n_object_lookups_negative;
55690-extern atomic_t fscache_n_object_lookups_positive;
55691-extern atomic_t fscache_n_object_lookups_timed_out;
55692-extern atomic_t fscache_n_object_created;
55693-extern atomic_t fscache_n_object_avail;
55694-extern atomic_t fscache_n_object_dead;
55695+extern atomic_unchecked_t fscache_n_object_alloc;
55696+extern atomic_unchecked_t fscache_n_object_no_alloc;
55697+extern atomic_unchecked_t fscache_n_object_lookups;
55698+extern atomic_unchecked_t fscache_n_object_lookups_negative;
55699+extern atomic_unchecked_t fscache_n_object_lookups_positive;
55700+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
55701+extern atomic_unchecked_t fscache_n_object_created;
55702+extern atomic_unchecked_t fscache_n_object_avail;
55703+extern atomic_unchecked_t fscache_n_object_dead;
55704
55705-extern atomic_t fscache_n_checkaux_none;
55706-extern atomic_t fscache_n_checkaux_okay;
55707-extern atomic_t fscache_n_checkaux_update;
55708-extern atomic_t fscache_n_checkaux_obsolete;
55709+extern atomic_unchecked_t fscache_n_checkaux_none;
55710+extern atomic_unchecked_t fscache_n_checkaux_okay;
55711+extern atomic_unchecked_t fscache_n_checkaux_update;
55712+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
55713
55714 extern atomic_t fscache_n_cop_alloc_object;
55715 extern atomic_t fscache_n_cop_lookup_object;
55716@@ -267,6 +267,11 @@ static inline void fscache_stat(atomic_t *stat)
55717 atomic_inc(stat);
55718 }
55719
55720+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
55721+{
55722+ atomic_inc_unchecked(stat);
55723+}
55724+
55725 static inline void fscache_stat_d(atomic_t *stat)
55726 {
55727 atomic_dec(stat);
55728@@ -279,6 +284,7 @@ extern const struct file_operations fscache_stats_fops;
55729
55730 #define __fscache_stat(stat) (NULL)
55731 #define fscache_stat(stat) do {} while (0)
55732+#define fscache_stat_unchecked(stat) do {} while (0)
55733 #define fscache_stat_d(stat) do {} while (0)
55734 #endif
55735
55736diff --git a/fs/fscache/object.c b/fs/fscache/object.c
55737index 50d41c1..10ee117 100644
55738--- a/fs/fscache/object.c
55739+++ b/fs/fscache/object.c
55740@@ -143,7 +143,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
55741 /* Invalidate an object on disk */
55742 case FSCACHE_OBJECT_INVALIDATING:
55743 clear_bit(FSCACHE_OBJECT_EV_INVALIDATE, &object->events);
55744- fscache_stat(&fscache_n_invalidates_run);
55745+ fscache_stat_unchecked(&fscache_n_invalidates_run);
55746 fscache_stat(&fscache_n_cop_invalidate_object);
55747 fscache_invalidate_object(object);
55748 fscache_stat_d(&fscache_n_cop_invalidate_object);
55749@@ -153,7 +153,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
55750 /* update the object metadata on disk */
55751 case FSCACHE_OBJECT_UPDATING:
55752 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
55753- fscache_stat(&fscache_n_updates_run);
55754+ fscache_stat_unchecked(&fscache_n_updates_run);
55755 fscache_stat(&fscache_n_cop_update_object);
55756 object->cache->ops->update_object(object);
55757 fscache_stat_d(&fscache_n_cop_update_object);
55758@@ -242,7 +242,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
55759 spin_lock(&object->lock);
55760 object->state = FSCACHE_OBJECT_DEAD;
55761 spin_unlock(&object->lock);
55762- fscache_stat(&fscache_n_object_dead);
55763+ fscache_stat_unchecked(&fscache_n_object_dead);
55764 goto terminal_transit;
55765
55766 /* handle the parent cache of this object being withdrawn from
55767@@ -257,7 +257,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
55768 spin_lock(&object->lock);
55769 object->state = FSCACHE_OBJECT_DEAD;
55770 spin_unlock(&object->lock);
55771- fscache_stat(&fscache_n_object_dead);
55772+ fscache_stat_unchecked(&fscache_n_object_dead);
55773 goto terminal_transit;
55774
55775 /* complain about the object being woken up once it is
55776@@ -495,7 +495,7 @@ static void fscache_lookup_object(struct fscache_object *object)
55777 parent->cookie->def->name, cookie->def->name,
55778 object->cache->tag->name);
55779
55780- fscache_stat(&fscache_n_object_lookups);
55781+ fscache_stat_unchecked(&fscache_n_object_lookups);
55782 fscache_stat(&fscache_n_cop_lookup_object);
55783 ret = object->cache->ops->lookup_object(object);
55784 fscache_stat_d(&fscache_n_cop_lookup_object);
55785@@ -506,7 +506,7 @@ static void fscache_lookup_object(struct fscache_object *object)
55786 if (ret == -ETIMEDOUT) {
55787 /* probably stuck behind another object, so move this one to
55788 * the back of the queue */
55789- fscache_stat(&fscache_n_object_lookups_timed_out);
55790+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
55791 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
55792 }
55793
55794@@ -529,7 +529,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
55795
55796 spin_lock(&object->lock);
55797 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
55798- fscache_stat(&fscache_n_object_lookups_negative);
55799+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
55800
55801 /* transit here to allow write requests to begin stacking up
55802 * and read requests to begin returning ENODATA */
55803@@ -575,7 +575,7 @@ void fscache_obtained_object(struct fscache_object *object)
55804 * result, in which case there may be data available */
55805 spin_lock(&object->lock);
55806 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
55807- fscache_stat(&fscache_n_object_lookups_positive);
55808+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
55809
55810 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
55811
55812@@ -589,7 +589,7 @@ void fscache_obtained_object(struct fscache_object *object)
55813 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
55814 } else {
55815 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
55816- fscache_stat(&fscache_n_object_created);
55817+ fscache_stat_unchecked(&fscache_n_object_created);
55818
55819 object->state = FSCACHE_OBJECT_AVAILABLE;
55820 spin_unlock(&object->lock);
55821@@ -634,7 +634,7 @@ static void fscache_object_available(struct fscache_object *object)
55822 fscache_enqueue_dependents(object);
55823
55824 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
55825- fscache_stat(&fscache_n_object_avail);
55826+ fscache_stat_unchecked(&fscache_n_object_avail);
55827
55828 _leave("");
55829 }
55830@@ -894,7 +894,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
55831 enum fscache_checkaux result;
55832
55833 if (!object->cookie->def->check_aux) {
55834- fscache_stat(&fscache_n_checkaux_none);
55835+ fscache_stat_unchecked(&fscache_n_checkaux_none);
55836 return FSCACHE_CHECKAUX_OKAY;
55837 }
55838
55839@@ -903,17 +903,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
55840 switch (result) {
55841 /* entry okay as is */
55842 case FSCACHE_CHECKAUX_OKAY:
55843- fscache_stat(&fscache_n_checkaux_okay);
55844+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
55845 break;
55846
55847 /* entry requires update */
55848 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
55849- fscache_stat(&fscache_n_checkaux_update);
55850+ fscache_stat_unchecked(&fscache_n_checkaux_update);
55851 break;
55852
55853 /* entry requires deletion */
55854 case FSCACHE_CHECKAUX_OBSOLETE:
55855- fscache_stat(&fscache_n_checkaux_obsolete);
55856+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
55857 break;
55858
55859 default:
55860diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
55861index 762a9ec..2023284 100644
55862--- a/fs/fscache/operation.c
55863+++ b/fs/fscache/operation.c
55864@@ -17,7 +17,7 @@
55865 #include <linux/slab.h>
55866 #include "internal.h"
55867
55868-atomic_t fscache_op_debug_id;
55869+atomic_unchecked_t fscache_op_debug_id;
55870 EXPORT_SYMBOL(fscache_op_debug_id);
55871
55872 /**
55873@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
55874 ASSERTCMP(atomic_read(&op->usage), >, 0);
55875 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
55876
55877- fscache_stat(&fscache_n_op_enqueue);
55878+ fscache_stat_unchecked(&fscache_n_op_enqueue);
55879 switch (op->flags & FSCACHE_OP_TYPE) {
55880 case FSCACHE_OP_ASYNC:
55881 _debug("queue async");
55882@@ -73,7 +73,7 @@ static void fscache_run_op(struct fscache_object *object,
55883 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
55884 if (op->processor)
55885 fscache_enqueue_operation(op);
55886- fscache_stat(&fscache_n_op_run);
55887+ fscache_stat_unchecked(&fscache_n_op_run);
55888 }
55889
55890 /*
55891@@ -105,11 +105,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
55892 if (object->n_in_progress > 0) {
55893 atomic_inc(&op->usage);
55894 list_add_tail(&op->pend_link, &object->pending_ops);
55895- fscache_stat(&fscache_n_op_pend);
55896+ fscache_stat_unchecked(&fscache_n_op_pend);
55897 } else if (!list_empty(&object->pending_ops)) {
55898 atomic_inc(&op->usage);
55899 list_add_tail(&op->pend_link, &object->pending_ops);
55900- fscache_stat(&fscache_n_op_pend);
55901+ fscache_stat_unchecked(&fscache_n_op_pend);
55902 fscache_start_operations(object);
55903 } else {
55904 ASSERTCMP(object->n_in_progress, ==, 0);
55905@@ -125,7 +125,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
55906 object->n_exclusive++; /* reads and writes must wait */
55907 atomic_inc(&op->usage);
55908 list_add_tail(&op->pend_link, &object->pending_ops);
55909- fscache_stat(&fscache_n_op_pend);
55910+ fscache_stat_unchecked(&fscache_n_op_pend);
55911 ret = 0;
55912 } else {
55913 /* If we're in any other state, there must have been an I/O
55914@@ -215,11 +215,11 @@ int fscache_submit_op(struct fscache_object *object,
55915 if (object->n_exclusive > 0) {
55916 atomic_inc(&op->usage);
55917 list_add_tail(&op->pend_link, &object->pending_ops);
55918- fscache_stat(&fscache_n_op_pend);
55919+ fscache_stat_unchecked(&fscache_n_op_pend);
55920 } else if (!list_empty(&object->pending_ops)) {
55921 atomic_inc(&op->usage);
55922 list_add_tail(&op->pend_link, &object->pending_ops);
55923- fscache_stat(&fscache_n_op_pend);
55924+ fscache_stat_unchecked(&fscache_n_op_pend);
55925 fscache_start_operations(object);
55926 } else {
55927 ASSERTCMP(object->n_exclusive, ==, 0);
55928@@ -231,12 +231,12 @@ int fscache_submit_op(struct fscache_object *object,
55929 object->n_ops++;
55930 atomic_inc(&op->usage);
55931 list_add_tail(&op->pend_link, &object->pending_ops);
55932- fscache_stat(&fscache_n_op_pend);
55933+ fscache_stat_unchecked(&fscache_n_op_pend);
55934 ret = 0;
55935 } else if (object->state == FSCACHE_OBJECT_DYING ||
55936 object->state == FSCACHE_OBJECT_LC_DYING ||
55937 object->state == FSCACHE_OBJECT_WITHDRAWING) {
55938- fscache_stat(&fscache_n_op_rejected);
55939+ fscache_stat_unchecked(&fscache_n_op_rejected);
55940 op->state = FSCACHE_OP_ST_CANCELLED;
55941 ret = -ENOBUFS;
55942 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
55943@@ -315,7 +315,7 @@ int fscache_cancel_op(struct fscache_operation *op,
55944 ret = -EBUSY;
55945 if (op->state == FSCACHE_OP_ST_PENDING) {
55946 ASSERT(!list_empty(&op->pend_link));
55947- fscache_stat(&fscache_n_op_cancelled);
55948+ fscache_stat_unchecked(&fscache_n_op_cancelled);
55949 list_del_init(&op->pend_link);
55950 if (do_cancel)
55951 do_cancel(op);
55952@@ -347,7 +347,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
55953 while (!list_empty(&object->pending_ops)) {
55954 op = list_entry(object->pending_ops.next,
55955 struct fscache_operation, pend_link);
55956- fscache_stat(&fscache_n_op_cancelled);
55957+ fscache_stat_unchecked(&fscache_n_op_cancelled);
55958 list_del_init(&op->pend_link);
55959
55960 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
55961@@ -419,7 +419,7 @@ void fscache_put_operation(struct fscache_operation *op)
55962 op->state, ==, FSCACHE_OP_ST_CANCELLED);
55963 op->state = FSCACHE_OP_ST_DEAD;
55964
55965- fscache_stat(&fscache_n_op_release);
55966+ fscache_stat_unchecked(&fscache_n_op_release);
55967
55968 if (op->release) {
55969 op->release(op);
55970@@ -442,7 +442,7 @@ void fscache_put_operation(struct fscache_operation *op)
55971 * lock, and defer it otherwise */
55972 if (!spin_trylock(&object->lock)) {
55973 _debug("defer put");
55974- fscache_stat(&fscache_n_op_deferred_release);
55975+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
55976
55977 cache = object->cache;
55978 spin_lock(&cache->op_gc_list_lock);
55979@@ -495,7 +495,7 @@ void fscache_operation_gc(struct work_struct *work)
55980
55981 _debug("GC DEFERRED REL OBJ%x OP%x",
55982 object->debug_id, op->debug_id);
55983- fscache_stat(&fscache_n_op_gc);
55984+ fscache_stat_unchecked(&fscache_n_op_gc);
55985
55986 ASSERTCMP(atomic_read(&op->usage), ==, 0);
55987 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
55988diff --git a/fs/fscache/page.c b/fs/fscache/page.c
55989index ff000e5..c44ec6d 100644
55990--- a/fs/fscache/page.c
55991+++ b/fs/fscache/page.c
55992@@ -61,7 +61,7 @@ try_again:
55993 val = radix_tree_lookup(&cookie->stores, page->index);
55994 if (!val) {
55995 rcu_read_unlock();
55996- fscache_stat(&fscache_n_store_vmscan_not_storing);
55997+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
55998 __fscache_uncache_page(cookie, page);
55999 return true;
56000 }
56001@@ -91,11 +91,11 @@ try_again:
56002 spin_unlock(&cookie->stores_lock);
56003
56004 if (xpage) {
56005- fscache_stat(&fscache_n_store_vmscan_cancelled);
56006- fscache_stat(&fscache_n_store_radix_deletes);
56007+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
56008+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
56009 ASSERTCMP(xpage, ==, page);
56010 } else {
56011- fscache_stat(&fscache_n_store_vmscan_gone);
56012+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
56013 }
56014
56015 wake_up_bit(&cookie->flags, 0);
56016@@ -110,11 +110,11 @@ page_busy:
56017 * sleeping on memory allocation, so we may need to impose a timeout
56018 * too. */
56019 if (!(gfp & __GFP_WAIT)) {
56020- fscache_stat(&fscache_n_store_vmscan_busy);
56021+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
56022 return false;
56023 }
56024
56025- fscache_stat(&fscache_n_store_vmscan_wait);
56026+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
56027 __fscache_wait_on_page_write(cookie, page);
56028 gfp &= ~__GFP_WAIT;
56029 goto try_again;
56030@@ -140,7 +140,7 @@ static void fscache_end_page_write(struct fscache_object *object,
56031 FSCACHE_COOKIE_STORING_TAG);
56032 if (!radix_tree_tag_get(&cookie->stores, page->index,
56033 FSCACHE_COOKIE_PENDING_TAG)) {
56034- fscache_stat(&fscache_n_store_radix_deletes);
56035+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
56036 xpage = radix_tree_delete(&cookie->stores, page->index);
56037 }
56038 spin_unlock(&cookie->stores_lock);
56039@@ -161,7 +161,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
56040
56041 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
56042
56043- fscache_stat(&fscache_n_attr_changed_calls);
56044+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
56045
56046 if (fscache_object_is_active(object)) {
56047 fscache_stat(&fscache_n_cop_attr_changed);
56048@@ -187,11 +187,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
56049
56050 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
56051
56052- fscache_stat(&fscache_n_attr_changed);
56053+ fscache_stat_unchecked(&fscache_n_attr_changed);
56054
56055 op = kzalloc(sizeof(*op), GFP_KERNEL);
56056 if (!op) {
56057- fscache_stat(&fscache_n_attr_changed_nomem);
56058+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
56059 _leave(" = -ENOMEM");
56060 return -ENOMEM;
56061 }
56062@@ -209,7 +209,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
56063 if (fscache_submit_exclusive_op(object, op) < 0)
56064 goto nobufs;
56065 spin_unlock(&cookie->lock);
56066- fscache_stat(&fscache_n_attr_changed_ok);
56067+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
56068 fscache_put_operation(op);
56069 _leave(" = 0");
56070 return 0;
56071@@ -217,7 +217,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
56072 nobufs:
56073 spin_unlock(&cookie->lock);
56074 kfree(op);
56075- fscache_stat(&fscache_n_attr_changed_nobufs);
56076+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
56077 _leave(" = %d", -ENOBUFS);
56078 return -ENOBUFS;
56079 }
56080@@ -255,7 +255,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
56081 /* allocate a retrieval operation and attempt to submit it */
56082 op = kzalloc(sizeof(*op), GFP_NOIO);
56083 if (!op) {
56084- fscache_stat(&fscache_n_retrievals_nomem);
56085+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
56086 return NULL;
56087 }
56088
56089@@ -283,13 +283,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
56090 return 0;
56091 }
56092
56093- fscache_stat(&fscache_n_retrievals_wait);
56094+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
56095
56096 jif = jiffies;
56097 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
56098 fscache_wait_bit_interruptible,
56099 TASK_INTERRUPTIBLE) != 0) {
56100- fscache_stat(&fscache_n_retrievals_intr);
56101+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
56102 _leave(" = -ERESTARTSYS");
56103 return -ERESTARTSYS;
56104 }
56105@@ -318,8 +318,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
56106 */
56107 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
56108 struct fscache_retrieval *op,
56109- atomic_t *stat_op_waits,
56110- atomic_t *stat_object_dead)
56111+ atomic_unchecked_t *stat_op_waits,
56112+ atomic_unchecked_t *stat_object_dead)
56113 {
56114 int ret;
56115
56116@@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
56117 goto check_if_dead;
56118
56119 _debug(">>> WT");
56120- fscache_stat(stat_op_waits);
56121+ fscache_stat_unchecked(stat_op_waits);
56122 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
56123 fscache_wait_bit_interruptible,
56124 TASK_INTERRUPTIBLE) != 0) {
56125@@ -344,14 +344,14 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
56126
56127 check_if_dead:
56128 if (op->op.state == FSCACHE_OP_ST_CANCELLED) {
56129- fscache_stat(stat_object_dead);
56130+ fscache_stat_unchecked(stat_object_dead);
56131 _leave(" = -ENOBUFS [cancelled]");
56132 return -ENOBUFS;
56133 }
56134 if (unlikely(fscache_object_is_dead(object))) {
56135 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->op.state);
56136 fscache_cancel_op(&op->op, fscache_do_cancel_retrieval);
56137- fscache_stat(stat_object_dead);
56138+ fscache_stat_unchecked(stat_object_dead);
56139 return -ENOBUFS;
56140 }
56141 return 0;
56142@@ -378,7 +378,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
56143
56144 _enter("%p,%p,,,", cookie, page);
56145
56146- fscache_stat(&fscache_n_retrievals);
56147+ fscache_stat_unchecked(&fscache_n_retrievals);
56148
56149 if (hlist_empty(&cookie->backing_objects))
56150 goto nobufs;
56151@@ -417,7 +417,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
56152 goto nobufs_unlock_dec;
56153 spin_unlock(&cookie->lock);
56154
56155- fscache_stat(&fscache_n_retrieval_ops);
56156+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
56157
56158 /* pin the netfs read context in case we need to do the actual netfs
56159 * read because we've encountered a cache read failure */
56160@@ -447,15 +447,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
56161
56162 error:
56163 if (ret == -ENOMEM)
56164- fscache_stat(&fscache_n_retrievals_nomem);
56165+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
56166 else if (ret == -ERESTARTSYS)
56167- fscache_stat(&fscache_n_retrievals_intr);
56168+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
56169 else if (ret == -ENODATA)
56170- fscache_stat(&fscache_n_retrievals_nodata);
56171+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
56172 else if (ret < 0)
56173- fscache_stat(&fscache_n_retrievals_nobufs);
56174+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
56175 else
56176- fscache_stat(&fscache_n_retrievals_ok);
56177+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
56178
56179 fscache_put_retrieval(op);
56180 _leave(" = %d", ret);
56181@@ -467,7 +467,7 @@ nobufs_unlock:
56182 spin_unlock(&cookie->lock);
56183 kfree(op);
56184 nobufs:
56185- fscache_stat(&fscache_n_retrievals_nobufs);
56186+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
56187 _leave(" = -ENOBUFS");
56188 return -ENOBUFS;
56189 }
56190@@ -505,7 +505,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
56191
56192 _enter("%p,,%d,,,", cookie, *nr_pages);
56193
56194- fscache_stat(&fscache_n_retrievals);
56195+ fscache_stat_unchecked(&fscache_n_retrievals);
56196
56197 if (hlist_empty(&cookie->backing_objects))
56198 goto nobufs;
56199@@ -541,7 +541,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
56200 goto nobufs_unlock_dec;
56201 spin_unlock(&cookie->lock);
56202
56203- fscache_stat(&fscache_n_retrieval_ops);
56204+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
56205
56206 /* pin the netfs read context in case we need to do the actual netfs
56207 * read because we've encountered a cache read failure */
56208@@ -571,15 +571,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
56209
56210 error:
56211 if (ret == -ENOMEM)
56212- fscache_stat(&fscache_n_retrievals_nomem);
56213+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
56214 else if (ret == -ERESTARTSYS)
56215- fscache_stat(&fscache_n_retrievals_intr);
56216+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
56217 else if (ret == -ENODATA)
56218- fscache_stat(&fscache_n_retrievals_nodata);
56219+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
56220 else if (ret < 0)
56221- fscache_stat(&fscache_n_retrievals_nobufs);
56222+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
56223 else
56224- fscache_stat(&fscache_n_retrievals_ok);
56225+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
56226
56227 fscache_put_retrieval(op);
56228 _leave(" = %d", ret);
56229@@ -591,7 +591,7 @@ nobufs_unlock:
56230 spin_unlock(&cookie->lock);
56231 kfree(op);
56232 nobufs:
56233- fscache_stat(&fscache_n_retrievals_nobufs);
56234+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
56235 _leave(" = -ENOBUFS");
56236 return -ENOBUFS;
56237 }
56238@@ -615,7 +615,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
56239
56240 _enter("%p,%p,,,", cookie, page);
56241
56242- fscache_stat(&fscache_n_allocs);
56243+ fscache_stat_unchecked(&fscache_n_allocs);
56244
56245 if (hlist_empty(&cookie->backing_objects))
56246 goto nobufs;
56247@@ -647,7 +647,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
56248 goto nobufs_unlock;
56249 spin_unlock(&cookie->lock);
56250
56251- fscache_stat(&fscache_n_alloc_ops);
56252+ fscache_stat_unchecked(&fscache_n_alloc_ops);
56253
56254 ret = fscache_wait_for_retrieval_activation(
56255 object, op,
56256@@ -663,11 +663,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
56257
56258 error:
56259 if (ret == -ERESTARTSYS)
56260- fscache_stat(&fscache_n_allocs_intr);
56261+ fscache_stat_unchecked(&fscache_n_allocs_intr);
56262 else if (ret < 0)
56263- fscache_stat(&fscache_n_allocs_nobufs);
56264+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
56265 else
56266- fscache_stat(&fscache_n_allocs_ok);
56267+ fscache_stat_unchecked(&fscache_n_allocs_ok);
56268
56269 fscache_put_retrieval(op);
56270 _leave(" = %d", ret);
56271@@ -677,7 +677,7 @@ nobufs_unlock:
56272 spin_unlock(&cookie->lock);
56273 kfree(op);
56274 nobufs:
56275- fscache_stat(&fscache_n_allocs_nobufs);
56276+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
56277 _leave(" = -ENOBUFS");
56278 return -ENOBUFS;
56279 }
56280@@ -736,7 +736,7 @@ static void fscache_write_op(struct fscache_operation *_op)
56281
56282 spin_lock(&cookie->stores_lock);
56283
56284- fscache_stat(&fscache_n_store_calls);
56285+ fscache_stat_unchecked(&fscache_n_store_calls);
56286
56287 /* find a page to store */
56288 page = NULL;
56289@@ -747,7 +747,7 @@ static void fscache_write_op(struct fscache_operation *_op)
56290 page = results[0];
56291 _debug("gang %d [%lx]", n, page->index);
56292 if (page->index > op->store_limit) {
56293- fscache_stat(&fscache_n_store_pages_over_limit);
56294+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
56295 goto superseded;
56296 }
56297
56298@@ -759,7 +759,7 @@ static void fscache_write_op(struct fscache_operation *_op)
56299 spin_unlock(&cookie->stores_lock);
56300 spin_unlock(&object->lock);
56301
56302- fscache_stat(&fscache_n_store_pages);
56303+ fscache_stat_unchecked(&fscache_n_store_pages);
56304 fscache_stat(&fscache_n_cop_write_page);
56305 ret = object->cache->ops->write_page(op, page);
56306 fscache_stat_d(&fscache_n_cop_write_page);
56307@@ -860,7 +860,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
56308 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
56309 ASSERT(PageFsCache(page));
56310
56311- fscache_stat(&fscache_n_stores);
56312+ fscache_stat_unchecked(&fscache_n_stores);
56313
56314 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
56315 _leave(" = -ENOBUFS [invalidating]");
56316@@ -916,7 +916,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
56317 spin_unlock(&cookie->stores_lock);
56318 spin_unlock(&object->lock);
56319
56320- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
56321+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
56322 op->store_limit = object->store_limit;
56323
56324 if (fscache_submit_op(object, &op->op) < 0)
56325@@ -924,8 +924,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
56326
56327 spin_unlock(&cookie->lock);
56328 radix_tree_preload_end();
56329- fscache_stat(&fscache_n_store_ops);
56330- fscache_stat(&fscache_n_stores_ok);
56331+ fscache_stat_unchecked(&fscache_n_store_ops);
56332+ fscache_stat_unchecked(&fscache_n_stores_ok);
56333
56334 /* the work queue now carries its own ref on the object */
56335 fscache_put_operation(&op->op);
56336@@ -933,14 +933,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
56337 return 0;
56338
56339 already_queued:
56340- fscache_stat(&fscache_n_stores_again);
56341+ fscache_stat_unchecked(&fscache_n_stores_again);
56342 already_pending:
56343 spin_unlock(&cookie->stores_lock);
56344 spin_unlock(&object->lock);
56345 spin_unlock(&cookie->lock);
56346 radix_tree_preload_end();
56347 kfree(op);
56348- fscache_stat(&fscache_n_stores_ok);
56349+ fscache_stat_unchecked(&fscache_n_stores_ok);
56350 _leave(" = 0");
56351 return 0;
56352
56353@@ -959,14 +959,14 @@ nobufs:
56354 spin_unlock(&cookie->lock);
56355 radix_tree_preload_end();
56356 kfree(op);
56357- fscache_stat(&fscache_n_stores_nobufs);
56358+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
56359 _leave(" = -ENOBUFS");
56360 return -ENOBUFS;
56361
56362 nomem_free:
56363 kfree(op);
56364 nomem:
56365- fscache_stat(&fscache_n_stores_oom);
56366+ fscache_stat_unchecked(&fscache_n_stores_oom);
56367 _leave(" = -ENOMEM");
56368 return -ENOMEM;
56369 }
56370@@ -984,7 +984,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
56371 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
56372 ASSERTCMP(page, !=, NULL);
56373
56374- fscache_stat(&fscache_n_uncaches);
56375+ fscache_stat_unchecked(&fscache_n_uncaches);
56376
56377 /* cache withdrawal may beat us to it */
56378 if (!PageFsCache(page))
56379@@ -1035,7 +1035,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
56380 struct fscache_cookie *cookie = op->op.object->cookie;
56381
56382 #ifdef CONFIG_FSCACHE_STATS
56383- atomic_inc(&fscache_n_marks);
56384+ atomic_inc_unchecked(&fscache_n_marks);
56385 #endif
56386
56387 _debug("- mark %p{%lx}", page, page->index);
56388diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
56389index 40d13c7..ddf52b9 100644
56390--- a/fs/fscache/stats.c
56391+++ b/fs/fscache/stats.c
56392@@ -18,99 +18,99 @@
56393 /*
56394 * operation counters
56395 */
56396-atomic_t fscache_n_op_pend;
56397-atomic_t fscache_n_op_run;
56398-atomic_t fscache_n_op_enqueue;
56399-atomic_t fscache_n_op_requeue;
56400-atomic_t fscache_n_op_deferred_release;
56401-atomic_t fscache_n_op_release;
56402-atomic_t fscache_n_op_gc;
56403-atomic_t fscache_n_op_cancelled;
56404-atomic_t fscache_n_op_rejected;
56405+atomic_unchecked_t fscache_n_op_pend;
56406+atomic_unchecked_t fscache_n_op_run;
56407+atomic_unchecked_t fscache_n_op_enqueue;
56408+atomic_unchecked_t fscache_n_op_requeue;
56409+atomic_unchecked_t fscache_n_op_deferred_release;
56410+atomic_unchecked_t fscache_n_op_release;
56411+atomic_unchecked_t fscache_n_op_gc;
56412+atomic_unchecked_t fscache_n_op_cancelled;
56413+atomic_unchecked_t fscache_n_op_rejected;
56414
56415-atomic_t fscache_n_attr_changed;
56416-atomic_t fscache_n_attr_changed_ok;
56417-atomic_t fscache_n_attr_changed_nobufs;
56418-atomic_t fscache_n_attr_changed_nomem;
56419-atomic_t fscache_n_attr_changed_calls;
56420+atomic_unchecked_t fscache_n_attr_changed;
56421+atomic_unchecked_t fscache_n_attr_changed_ok;
56422+atomic_unchecked_t fscache_n_attr_changed_nobufs;
56423+atomic_unchecked_t fscache_n_attr_changed_nomem;
56424+atomic_unchecked_t fscache_n_attr_changed_calls;
56425
56426-atomic_t fscache_n_allocs;
56427-atomic_t fscache_n_allocs_ok;
56428-atomic_t fscache_n_allocs_wait;
56429-atomic_t fscache_n_allocs_nobufs;
56430-atomic_t fscache_n_allocs_intr;
56431-atomic_t fscache_n_allocs_object_dead;
56432-atomic_t fscache_n_alloc_ops;
56433-atomic_t fscache_n_alloc_op_waits;
56434+atomic_unchecked_t fscache_n_allocs;
56435+atomic_unchecked_t fscache_n_allocs_ok;
56436+atomic_unchecked_t fscache_n_allocs_wait;
56437+atomic_unchecked_t fscache_n_allocs_nobufs;
56438+atomic_unchecked_t fscache_n_allocs_intr;
56439+atomic_unchecked_t fscache_n_allocs_object_dead;
56440+atomic_unchecked_t fscache_n_alloc_ops;
56441+atomic_unchecked_t fscache_n_alloc_op_waits;
56442
56443-atomic_t fscache_n_retrievals;
56444-atomic_t fscache_n_retrievals_ok;
56445-atomic_t fscache_n_retrievals_wait;
56446-atomic_t fscache_n_retrievals_nodata;
56447-atomic_t fscache_n_retrievals_nobufs;
56448-atomic_t fscache_n_retrievals_intr;
56449-atomic_t fscache_n_retrievals_nomem;
56450-atomic_t fscache_n_retrievals_object_dead;
56451-atomic_t fscache_n_retrieval_ops;
56452-atomic_t fscache_n_retrieval_op_waits;
56453+atomic_unchecked_t fscache_n_retrievals;
56454+atomic_unchecked_t fscache_n_retrievals_ok;
56455+atomic_unchecked_t fscache_n_retrievals_wait;
56456+atomic_unchecked_t fscache_n_retrievals_nodata;
56457+atomic_unchecked_t fscache_n_retrievals_nobufs;
56458+atomic_unchecked_t fscache_n_retrievals_intr;
56459+atomic_unchecked_t fscache_n_retrievals_nomem;
56460+atomic_unchecked_t fscache_n_retrievals_object_dead;
56461+atomic_unchecked_t fscache_n_retrieval_ops;
56462+atomic_unchecked_t fscache_n_retrieval_op_waits;
56463
56464-atomic_t fscache_n_stores;
56465-atomic_t fscache_n_stores_ok;
56466-atomic_t fscache_n_stores_again;
56467-atomic_t fscache_n_stores_nobufs;
56468-atomic_t fscache_n_stores_oom;
56469-atomic_t fscache_n_store_ops;
56470-atomic_t fscache_n_store_calls;
56471-atomic_t fscache_n_store_pages;
56472-atomic_t fscache_n_store_radix_deletes;
56473-atomic_t fscache_n_store_pages_over_limit;
56474+atomic_unchecked_t fscache_n_stores;
56475+atomic_unchecked_t fscache_n_stores_ok;
56476+atomic_unchecked_t fscache_n_stores_again;
56477+atomic_unchecked_t fscache_n_stores_nobufs;
56478+atomic_unchecked_t fscache_n_stores_oom;
56479+atomic_unchecked_t fscache_n_store_ops;
56480+atomic_unchecked_t fscache_n_store_calls;
56481+atomic_unchecked_t fscache_n_store_pages;
56482+atomic_unchecked_t fscache_n_store_radix_deletes;
56483+atomic_unchecked_t fscache_n_store_pages_over_limit;
56484
56485-atomic_t fscache_n_store_vmscan_not_storing;
56486-atomic_t fscache_n_store_vmscan_gone;
56487-atomic_t fscache_n_store_vmscan_busy;
56488-atomic_t fscache_n_store_vmscan_cancelled;
56489-atomic_t fscache_n_store_vmscan_wait;
56490+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
56491+atomic_unchecked_t fscache_n_store_vmscan_gone;
56492+atomic_unchecked_t fscache_n_store_vmscan_busy;
56493+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
56494+atomic_unchecked_t fscache_n_store_vmscan_wait;
56495
56496-atomic_t fscache_n_marks;
56497-atomic_t fscache_n_uncaches;
56498+atomic_unchecked_t fscache_n_marks;
56499+atomic_unchecked_t fscache_n_uncaches;
56500
56501-atomic_t fscache_n_acquires;
56502-atomic_t fscache_n_acquires_null;
56503-atomic_t fscache_n_acquires_no_cache;
56504-atomic_t fscache_n_acquires_ok;
56505-atomic_t fscache_n_acquires_nobufs;
56506-atomic_t fscache_n_acquires_oom;
56507+atomic_unchecked_t fscache_n_acquires;
56508+atomic_unchecked_t fscache_n_acquires_null;
56509+atomic_unchecked_t fscache_n_acquires_no_cache;
56510+atomic_unchecked_t fscache_n_acquires_ok;
56511+atomic_unchecked_t fscache_n_acquires_nobufs;
56512+atomic_unchecked_t fscache_n_acquires_oom;
56513
56514-atomic_t fscache_n_invalidates;
56515-atomic_t fscache_n_invalidates_run;
56516+atomic_unchecked_t fscache_n_invalidates;
56517+atomic_unchecked_t fscache_n_invalidates_run;
56518
56519-atomic_t fscache_n_updates;
56520-atomic_t fscache_n_updates_null;
56521-atomic_t fscache_n_updates_run;
56522+atomic_unchecked_t fscache_n_updates;
56523+atomic_unchecked_t fscache_n_updates_null;
56524+atomic_unchecked_t fscache_n_updates_run;
56525
56526-atomic_t fscache_n_relinquishes;
56527-atomic_t fscache_n_relinquishes_null;
56528-atomic_t fscache_n_relinquishes_waitcrt;
56529-atomic_t fscache_n_relinquishes_retire;
56530+atomic_unchecked_t fscache_n_relinquishes;
56531+atomic_unchecked_t fscache_n_relinquishes_null;
56532+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
56533+atomic_unchecked_t fscache_n_relinquishes_retire;
56534
56535-atomic_t fscache_n_cookie_index;
56536-atomic_t fscache_n_cookie_data;
56537-atomic_t fscache_n_cookie_special;
56538+atomic_unchecked_t fscache_n_cookie_index;
56539+atomic_unchecked_t fscache_n_cookie_data;
56540+atomic_unchecked_t fscache_n_cookie_special;
56541
56542-atomic_t fscache_n_object_alloc;
56543-atomic_t fscache_n_object_no_alloc;
56544-atomic_t fscache_n_object_lookups;
56545-atomic_t fscache_n_object_lookups_negative;
56546-atomic_t fscache_n_object_lookups_positive;
56547-atomic_t fscache_n_object_lookups_timed_out;
56548-atomic_t fscache_n_object_created;
56549-atomic_t fscache_n_object_avail;
56550-atomic_t fscache_n_object_dead;
56551+atomic_unchecked_t fscache_n_object_alloc;
56552+atomic_unchecked_t fscache_n_object_no_alloc;
56553+atomic_unchecked_t fscache_n_object_lookups;
56554+atomic_unchecked_t fscache_n_object_lookups_negative;
56555+atomic_unchecked_t fscache_n_object_lookups_positive;
56556+atomic_unchecked_t fscache_n_object_lookups_timed_out;
56557+atomic_unchecked_t fscache_n_object_created;
56558+atomic_unchecked_t fscache_n_object_avail;
56559+atomic_unchecked_t fscache_n_object_dead;
56560
56561-atomic_t fscache_n_checkaux_none;
56562-atomic_t fscache_n_checkaux_okay;
56563-atomic_t fscache_n_checkaux_update;
56564-atomic_t fscache_n_checkaux_obsolete;
56565+atomic_unchecked_t fscache_n_checkaux_none;
56566+atomic_unchecked_t fscache_n_checkaux_okay;
56567+atomic_unchecked_t fscache_n_checkaux_update;
56568+atomic_unchecked_t fscache_n_checkaux_obsolete;
56569
56570 atomic_t fscache_n_cop_alloc_object;
56571 atomic_t fscache_n_cop_lookup_object;
56572@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
56573 seq_puts(m, "FS-Cache statistics\n");
56574
56575 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
56576- atomic_read(&fscache_n_cookie_index),
56577- atomic_read(&fscache_n_cookie_data),
56578- atomic_read(&fscache_n_cookie_special));
56579+ atomic_read_unchecked(&fscache_n_cookie_index),
56580+ atomic_read_unchecked(&fscache_n_cookie_data),
56581+ atomic_read_unchecked(&fscache_n_cookie_special));
56582
56583 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
56584- atomic_read(&fscache_n_object_alloc),
56585- atomic_read(&fscache_n_object_no_alloc),
56586- atomic_read(&fscache_n_object_avail),
56587- atomic_read(&fscache_n_object_dead));
56588+ atomic_read_unchecked(&fscache_n_object_alloc),
56589+ atomic_read_unchecked(&fscache_n_object_no_alloc),
56590+ atomic_read_unchecked(&fscache_n_object_avail),
56591+ atomic_read_unchecked(&fscache_n_object_dead));
56592 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
56593- atomic_read(&fscache_n_checkaux_none),
56594- atomic_read(&fscache_n_checkaux_okay),
56595- atomic_read(&fscache_n_checkaux_update),
56596- atomic_read(&fscache_n_checkaux_obsolete));
56597+ atomic_read_unchecked(&fscache_n_checkaux_none),
56598+ atomic_read_unchecked(&fscache_n_checkaux_okay),
56599+ atomic_read_unchecked(&fscache_n_checkaux_update),
56600+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
56601
56602 seq_printf(m, "Pages : mrk=%u unc=%u\n",
56603- atomic_read(&fscache_n_marks),
56604- atomic_read(&fscache_n_uncaches));
56605+ atomic_read_unchecked(&fscache_n_marks),
56606+ atomic_read_unchecked(&fscache_n_uncaches));
56607
56608 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
56609 " oom=%u\n",
56610- atomic_read(&fscache_n_acquires),
56611- atomic_read(&fscache_n_acquires_null),
56612- atomic_read(&fscache_n_acquires_no_cache),
56613- atomic_read(&fscache_n_acquires_ok),
56614- atomic_read(&fscache_n_acquires_nobufs),
56615- atomic_read(&fscache_n_acquires_oom));
56616+ atomic_read_unchecked(&fscache_n_acquires),
56617+ atomic_read_unchecked(&fscache_n_acquires_null),
56618+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
56619+ atomic_read_unchecked(&fscache_n_acquires_ok),
56620+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
56621+ atomic_read_unchecked(&fscache_n_acquires_oom));
56622
56623 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
56624- atomic_read(&fscache_n_object_lookups),
56625- atomic_read(&fscache_n_object_lookups_negative),
56626- atomic_read(&fscache_n_object_lookups_positive),
56627- atomic_read(&fscache_n_object_created),
56628- atomic_read(&fscache_n_object_lookups_timed_out));
56629+ atomic_read_unchecked(&fscache_n_object_lookups),
56630+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
56631+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
56632+ atomic_read_unchecked(&fscache_n_object_created),
56633+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
56634
56635 seq_printf(m, "Invals : n=%u run=%u\n",
56636- atomic_read(&fscache_n_invalidates),
56637- atomic_read(&fscache_n_invalidates_run));
56638+ atomic_read_unchecked(&fscache_n_invalidates),
56639+ atomic_read_unchecked(&fscache_n_invalidates_run));
56640
56641 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
56642- atomic_read(&fscache_n_updates),
56643- atomic_read(&fscache_n_updates_null),
56644- atomic_read(&fscache_n_updates_run));
56645+ atomic_read_unchecked(&fscache_n_updates),
56646+ atomic_read_unchecked(&fscache_n_updates_null),
56647+ atomic_read_unchecked(&fscache_n_updates_run));
56648
56649 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
56650- atomic_read(&fscache_n_relinquishes),
56651- atomic_read(&fscache_n_relinquishes_null),
56652- atomic_read(&fscache_n_relinquishes_waitcrt),
56653- atomic_read(&fscache_n_relinquishes_retire));
56654+ atomic_read_unchecked(&fscache_n_relinquishes),
56655+ atomic_read_unchecked(&fscache_n_relinquishes_null),
56656+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
56657+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
56658
56659 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
56660- atomic_read(&fscache_n_attr_changed),
56661- atomic_read(&fscache_n_attr_changed_ok),
56662- atomic_read(&fscache_n_attr_changed_nobufs),
56663- atomic_read(&fscache_n_attr_changed_nomem),
56664- atomic_read(&fscache_n_attr_changed_calls));
56665+ atomic_read_unchecked(&fscache_n_attr_changed),
56666+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
56667+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
56668+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
56669+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
56670
56671 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
56672- atomic_read(&fscache_n_allocs),
56673- atomic_read(&fscache_n_allocs_ok),
56674- atomic_read(&fscache_n_allocs_wait),
56675- atomic_read(&fscache_n_allocs_nobufs),
56676- atomic_read(&fscache_n_allocs_intr));
56677+ atomic_read_unchecked(&fscache_n_allocs),
56678+ atomic_read_unchecked(&fscache_n_allocs_ok),
56679+ atomic_read_unchecked(&fscache_n_allocs_wait),
56680+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
56681+ atomic_read_unchecked(&fscache_n_allocs_intr));
56682 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
56683- atomic_read(&fscache_n_alloc_ops),
56684- atomic_read(&fscache_n_alloc_op_waits),
56685- atomic_read(&fscache_n_allocs_object_dead));
56686+ atomic_read_unchecked(&fscache_n_alloc_ops),
56687+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
56688+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
56689
56690 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
56691 " int=%u oom=%u\n",
56692- atomic_read(&fscache_n_retrievals),
56693- atomic_read(&fscache_n_retrievals_ok),
56694- atomic_read(&fscache_n_retrievals_wait),
56695- atomic_read(&fscache_n_retrievals_nodata),
56696- atomic_read(&fscache_n_retrievals_nobufs),
56697- atomic_read(&fscache_n_retrievals_intr),
56698- atomic_read(&fscache_n_retrievals_nomem));
56699+ atomic_read_unchecked(&fscache_n_retrievals),
56700+ atomic_read_unchecked(&fscache_n_retrievals_ok),
56701+ atomic_read_unchecked(&fscache_n_retrievals_wait),
56702+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
56703+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
56704+ atomic_read_unchecked(&fscache_n_retrievals_intr),
56705+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
56706 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
56707- atomic_read(&fscache_n_retrieval_ops),
56708- atomic_read(&fscache_n_retrieval_op_waits),
56709- atomic_read(&fscache_n_retrievals_object_dead));
56710+ atomic_read_unchecked(&fscache_n_retrieval_ops),
56711+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
56712+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
56713
56714 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
56715- atomic_read(&fscache_n_stores),
56716- atomic_read(&fscache_n_stores_ok),
56717- atomic_read(&fscache_n_stores_again),
56718- atomic_read(&fscache_n_stores_nobufs),
56719- atomic_read(&fscache_n_stores_oom));
56720+ atomic_read_unchecked(&fscache_n_stores),
56721+ atomic_read_unchecked(&fscache_n_stores_ok),
56722+ atomic_read_unchecked(&fscache_n_stores_again),
56723+ atomic_read_unchecked(&fscache_n_stores_nobufs),
56724+ atomic_read_unchecked(&fscache_n_stores_oom));
56725 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
56726- atomic_read(&fscache_n_store_ops),
56727- atomic_read(&fscache_n_store_calls),
56728- atomic_read(&fscache_n_store_pages),
56729- atomic_read(&fscache_n_store_radix_deletes),
56730- atomic_read(&fscache_n_store_pages_over_limit));
56731+ atomic_read_unchecked(&fscache_n_store_ops),
56732+ atomic_read_unchecked(&fscache_n_store_calls),
56733+ atomic_read_unchecked(&fscache_n_store_pages),
56734+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
56735+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
56736
56737 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
56738- atomic_read(&fscache_n_store_vmscan_not_storing),
56739- atomic_read(&fscache_n_store_vmscan_gone),
56740- atomic_read(&fscache_n_store_vmscan_busy),
56741- atomic_read(&fscache_n_store_vmscan_cancelled),
56742- atomic_read(&fscache_n_store_vmscan_wait));
56743+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
56744+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
56745+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
56746+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
56747+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
56748
56749 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
56750- atomic_read(&fscache_n_op_pend),
56751- atomic_read(&fscache_n_op_run),
56752- atomic_read(&fscache_n_op_enqueue),
56753- atomic_read(&fscache_n_op_cancelled),
56754- atomic_read(&fscache_n_op_rejected));
56755+ atomic_read_unchecked(&fscache_n_op_pend),
56756+ atomic_read_unchecked(&fscache_n_op_run),
56757+ atomic_read_unchecked(&fscache_n_op_enqueue),
56758+ atomic_read_unchecked(&fscache_n_op_cancelled),
56759+ atomic_read_unchecked(&fscache_n_op_rejected));
56760 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
56761- atomic_read(&fscache_n_op_deferred_release),
56762- atomic_read(&fscache_n_op_release),
56763- atomic_read(&fscache_n_op_gc));
56764+ atomic_read_unchecked(&fscache_n_op_deferred_release),
56765+ atomic_read_unchecked(&fscache_n_op_release),
56766+ atomic_read_unchecked(&fscache_n_op_gc));
56767
56768 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
56769 atomic_read(&fscache_n_cop_alloc_object),
56770diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
56771index aef34b1..59bfd7b 100644
56772--- a/fs/fuse/cuse.c
56773+++ b/fs/fuse/cuse.c
56774@@ -600,10 +600,12 @@ static int __init cuse_init(void)
56775 INIT_LIST_HEAD(&cuse_conntbl[i]);
56776
56777 /* inherit and extend fuse_dev_operations */
56778- cuse_channel_fops = fuse_dev_operations;
56779- cuse_channel_fops.owner = THIS_MODULE;
56780- cuse_channel_fops.open = cuse_channel_open;
56781- cuse_channel_fops.release = cuse_channel_release;
56782+ pax_open_kernel();
56783+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
56784+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
56785+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
56786+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
56787+ pax_close_kernel();
56788
56789 cuse_class = class_create(THIS_MODULE, "cuse");
56790 if (IS_ERR(cuse_class))
56791diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
56792index 1d55f94..088da65 100644
56793--- a/fs/fuse/dev.c
56794+++ b/fs/fuse/dev.c
56795@@ -1339,7 +1339,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
56796 ret = 0;
56797 pipe_lock(pipe);
56798
56799- if (!pipe->readers) {
56800+ if (!atomic_read(&pipe->readers)) {
56801 send_sig(SIGPIPE, current, 0);
56802 if (!ret)
56803 ret = -EPIPE;
56804@@ -1364,7 +1364,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
56805 page_nr++;
56806 ret += buf->len;
56807
56808- if (pipe->files)
56809+ if (atomic_read(&pipe->files))
56810 do_wakeup = 1;
56811 }
56812
56813diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
56814index 5b12746..b481b03 100644
56815--- a/fs/fuse/dir.c
56816+++ b/fs/fuse/dir.c
56817@@ -1437,7 +1437,7 @@ static char *read_link(struct dentry *dentry)
56818 return link;
56819 }
56820
56821-static void free_link(char *link)
56822+static void free_link(const char *link)
56823 {
56824 if (!IS_ERR(link))
56825 free_page((unsigned long) link);
56826diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
56827index 62b484e..0f9a140 100644
56828--- a/fs/gfs2/inode.c
56829+++ b/fs/gfs2/inode.c
56830@@ -1441,7 +1441,7 @@ out:
56831
56832 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
56833 {
56834- char *s = nd_get_link(nd);
56835+ const char *s = nd_get_link(nd);
56836 if (!IS_ERR(s))
56837 kfree(s);
56838 }
56839diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
56840index a3f868a..bb308ae 100644
56841--- a/fs/hugetlbfs/inode.c
56842+++ b/fs/hugetlbfs/inode.c
56843@@ -152,6 +152,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
56844 struct mm_struct *mm = current->mm;
56845 struct vm_area_struct *vma;
56846 struct hstate *h = hstate_file(file);
56847+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
56848 struct vm_unmapped_area_info info;
56849
56850 if (len & ~huge_page_mask(h))
56851@@ -165,17 +166,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
56852 return addr;
56853 }
56854
56855+#ifdef CONFIG_PAX_RANDMMAP
56856+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
56857+#endif
56858+
56859 if (addr) {
56860 addr = ALIGN(addr, huge_page_size(h));
56861 vma = find_vma(mm, addr);
56862- if (TASK_SIZE - len >= addr &&
56863- (!vma || addr + len <= vma->vm_start))
56864+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
56865 return addr;
56866 }
56867
56868 info.flags = 0;
56869 info.length = len;
56870 info.low_limit = TASK_UNMAPPED_BASE;
56871+
56872+#ifdef CONFIG_PAX_RANDMMAP
56873+ if (mm->pax_flags & MF_PAX_RANDMMAP)
56874+ info.low_limit += mm->delta_mmap;
56875+#endif
56876+
56877 info.high_limit = TASK_SIZE;
56878 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
56879 info.align_offset = 0;
56880@@ -898,7 +908,7 @@ static struct file_system_type hugetlbfs_fs_type = {
56881 };
56882 MODULE_ALIAS_FS("hugetlbfs");
56883
56884-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
56885+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
56886
56887 static int can_do_hugetlb_shm(void)
56888 {
56889diff --git a/fs/inode.c b/fs/inode.c
56890index 00d5fc3..98ce7d7 100644
56891--- a/fs/inode.c
56892+++ b/fs/inode.c
56893@@ -878,8 +878,8 @@ unsigned int get_next_ino(void)
56894
56895 #ifdef CONFIG_SMP
56896 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
56897- static atomic_t shared_last_ino;
56898- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
56899+ static atomic_unchecked_t shared_last_ino;
56900+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
56901
56902 res = next - LAST_INO_BATCH;
56903 }
56904diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
56905index 4a6cf28..d3a29d3 100644
56906--- a/fs/jffs2/erase.c
56907+++ b/fs/jffs2/erase.c
56908@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
56909 struct jffs2_unknown_node marker = {
56910 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
56911 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
56912- .totlen = cpu_to_je32(c->cleanmarker_size)
56913+ .totlen = cpu_to_je32(c->cleanmarker_size),
56914+ .hdr_crc = cpu_to_je32(0)
56915 };
56916
56917 jffs2_prealloc_raw_node_refs(c, jeb, 1);
56918diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
56919index a6597d6..41b30ec 100644
56920--- a/fs/jffs2/wbuf.c
56921+++ b/fs/jffs2/wbuf.c
56922@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
56923 {
56924 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
56925 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
56926- .totlen = constant_cpu_to_je32(8)
56927+ .totlen = constant_cpu_to_je32(8),
56928+ .hdr_crc = constant_cpu_to_je32(0)
56929 };
56930
56931 /*
56932diff --git a/fs/jfs/super.c b/fs/jfs/super.c
56933index 788e0a9..8433098 100644
56934--- a/fs/jfs/super.c
56935+++ b/fs/jfs/super.c
56936@@ -878,7 +878,7 @@ static int __init init_jfs_fs(void)
56937
56938 jfs_inode_cachep =
56939 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
56940- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
56941+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
56942 init_once);
56943 if (jfs_inode_cachep == NULL)
56944 return -ENOMEM;
56945diff --git a/fs/libfs.c b/fs/libfs.c
56946index 916da8c..1588998 100644
56947--- a/fs/libfs.c
56948+++ b/fs/libfs.c
56949@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
56950
56951 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
56952 struct dentry *next;
56953+ char d_name[sizeof(next->d_iname)];
56954+ const unsigned char *name;
56955+
56956 next = list_entry(p, struct dentry, d_u.d_child);
56957 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
56958 if (!simple_positive(next)) {
56959@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
56960
56961 spin_unlock(&next->d_lock);
56962 spin_unlock(&dentry->d_lock);
56963- if (filldir(dirent, next->d_name.name,
56964+ name = next->d_name.name;
56965+ if (name == next->d_iname) {
56966+ memcpy(d_name, name, next->d_name.len);
56967+ name = d_name;
56968+ }
56969+ if (filldir(dirent, name,
56970 next->d_name.len, filp->f_pos,
56971 next->d_inode->i_ino,
56972 dt_type(next->d_inode)) < 0)
56973diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
56974index acd3947..1f896e2 100644
56975--- a/fs/lockd/clntproc.c
56976+++ b/fs/lockd/clntproc.c
56977@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
56978 /*
56979 * Cookie counter for NLM requests
56980 */
56981-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
56982+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
56983
56984 void nlmclnt_next_cookie(struct nlm_cookie *c)
56985 {
56986- u32 cookie = atomic_inc_return(&nlm_cookie);
56987+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
56988
56989 memcpy(c->data, &cookie, 4);
56990 c->len=4;
56991diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
56992index a2aa97d..10d6c41 100644
56993--- a/fs/lockd/svc.c
56994+++ b/fs/lockd/svc.c
56995@@ -305,7 +305,7 @@ static int lockd_start_svc(struct svc_serv *serv)
56996 svc_sock_update_bufs(serv);
56997 serv->sv_maxconn = nlm_max_connections;
56998
56999- nlmsvc_task = kthread_run(lockd, nlmsvc_rqst, serv->sv_name);
57000+ nlmsvc_task = kthread_run(lockd, nlmsvc_rqst, "%s", serv->sv_name);
57001 if (IS_ERR(nlmsvc_task)) {
57002 error = PTR_ERR(nlmsvc_task);
57003 printk(KERN_WARNING
57004diff --git a/fs/locks.c b/fs/locks.c
57005index cb424a4..850e4dd 100644
57006--- a/fs/locks.c
57007+++ b/fs/locks.c
57008@@ -2064,16 +2064,16 @@ void locks_remove_flock(struct file *filp)
57009 return;
57010
57011 if (filp->f_op && filp->f_op->flock) {
57012- struct file_lock fl = {
57013+ struct file_lock flock = {
57014 .fl_pid = current->tgid,
57015 .fl_file = filp,
57016 .fl_flags = FL_FLOCK,
57017 .fl_type = F_UNLCK,
57018 .fl_end = OFFSET_MAX,
57019 };
57020- filp->f_op->flock(filp, F_SETLKW, &fl);
57021- if (fl.fl_ops && fl.fl_ops->fl_release_private)
57022- fl.fl_ops->fl_release_private(&fl);
57023+ filp->f_op->flock(filp, F_SETLKW, &flock);
57024+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
57025+ flock.fl_ops->fl_release_private(&flock);
57026 }
57027
57028 lock_flocks();
57029diff --git a/fs/namei.c b/fs/namei.c
57030index 9ed9361..2b72db1 100644
57031--- a/fs/namei.c
57032+++ b/fs/namei.c
57033@@ -319,16 +319,32 @@ int generic_permission(struct inode *inode, int mask)
57034 if (ret != -EACCES)
57035 return ret;
57036
57037+#ifdef CONFIG_GRKERNSEC
57038+ /* we'll block if we have to log due to a denied capability use */
57039+ if (mask & MAY_NOT_BLOCK)
57040+ return -ECHILD;
57041+#endif
57042+
57043 if (S_ISDIR(inode->i_mode)) {
57044 /* DACs are overridable for directories */
57045- if (inode_capable(inode, CAP_DAC_OVERRIDE))
57046- return 0;
57047 if (!(mask & MAY_WRITE))
57048- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
57049+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
57050+ inode_capable(inode, CAP_DAC_READ_SEARCH))
57051 return 0;
57052+ if (inode_capable(inode, CAP_DAC_OVERRIDE))
57053+ return 0;
57054 return -EACCES;
57055 }
57056 /*
57057+ * Searching includes executable on directories, else just read.
57058+ */
57059+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
57060+ if (mask == MAY_READ)
57061+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
57062+ inode_capable(inode, CAP_DAC_READ_SEARCH))
57063+ return 0;
57064+
57065+ /*
57066 * Read/write DACs are always overridable.
57067 * Executable DACs are overridable when there is
57068 * at least one exec bit set.
57069@@ -337,14 +353,6 @@ int generic_permission(struct inode *inode, int mask)
57070 if (inode_capable(inode, CAP_DAC_OVERRIDE))
57071 return 0;
57072
57073- /*
57074- * Searching includes executable on directories, else just read.
57075- */
57076- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
57077- if (mask == MAY_READ)
57078- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
57079- return 0;
57080-
57081 return -EACCES;
57082 }
57083
57084@@ -820,7 +828,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
57085 {
57086 struct dentry *dentry = link->dentry;
57087 int error;
57088- char *s;
57089+ const char *s;
57090
57091 BUG_ON(nd->flags & LOOKUP_RCU);
57092
57093@@ -841,6 +849,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
57094 if (error)
57095 goto out_put_nd_path;
57096
57097+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
57098+ dentry->d_inode, dentry, nd->path.mnt)) {
57099+ error = -EACCES;
57100+ goto out_put_nd_path;
57101+ }
57102+
57103 nd->last_type = LAST_BIND;
57104 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
57105 error = PTR_ERR(*p);
57106@@ -1588,6 +1602,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
57107 if (res)
57108 break;
57109 res = walk_component(nd, path, LOOKUP_FOLLOW);
57110+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
57111+ res = -EACCES;
57112 put_link(nd, &link, cookie);
57113 } while (res > 0);
57114
57115@@ -1686,7 +1702,7 @@ EXPORT_SYMBOL(full_name_hash);
57116 static inline unsigned long hash_name(const char *name, unsigned int *hashp)
57117 {
57118 unsigned long a, b, adata, bdata, mask, hash, len;
57119- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
57120+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
57121
57122 hash = a = 0;
57123 len = -sizeof(unsigned long);
57124@@ -1968,6 +1984,8 @@ static int path_lookupat(int dfd, const char *name,
57125 if (err)
57126 break;
57127 err = lookup_last(nd, &path);
57128+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
57129+ err = -EACCES;
57130 put_link(nd, &link, cookie);
57131 }
57132 }
57133@@ -1975,6 +1993,13 @@ static int path_lookupat(int dfd, const char *name,
57134 if (!err)
57135 err = complete_walk(nd);
57136
57137+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
57138+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
57139+ path_put(&nd->path);
57140+ err = -ENOENT;
57141+ }
57142+ }
57143+
57144 if (!err && nd->flags & LOOKUP_DIRECTORY) {
57145 if (!can_lookup(nd->inode)) {
57146 path_put(&nd->path);
57147@@ -2002,8 +2027,15 @@ static int filename_lookup(int dfd, struct filename *name,
57148 retval = path_lookupat(dfd, name->name,
57149 flags | LOOKUP_REVAL, nd);
57150
57151- if (likely(!retval))
57152+ if (likely(!retval)) {
57153 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
57154+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
57155+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
57156+ path_put(&nd->path);
57157+ return -ENOENT;
57158+ }
57159+ }
57160+ }
57161 return retval;
57162 }
57163
57164@@ -2381,6 +2413,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
57165 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
57166 return -EPERM;
57167
57168+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
57169+ return -EPERM;
57170+ if (gr_handle_rawio(inode))
57171+ return -EPERM;
57172+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
57173+ return -EACCES;
57174+
57175 return 0;
57176 }
57177
57178@@ -2602,7 +2641,7 @@ looked_up:
57179 * cleared otherwise prior to returning.
57180 */
57181 static int lookup_open(struct nameidata *nd, struct path *path,
57182- struct file *file,
57183+ struct path *link, struct file *file,
57184 const struct open_flags *op,
57185 bool got_write, int *opened)
57186 {
57187@@ -2637,6 +2676,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
57188 /* Negative dentry, just create the file */
57189 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
57190 umode_t mode = op->mode;
57191+
57192+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
57193+ error = -EACCES;
57194+ goto out_dput;
57195+ }
57196+
57197+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
57198+ error = -EACCES;
57199+ goto out_dput;
57200+ }
57201+
57202 if (!IS_POSIXACL(dir->d_inode))
57203 mode &= ~current_umask();
57204 /*
57205@@ -2658,6 +2708,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
57206 nd->flags & LOOKUP_EXCL);
57207 if (error)
57208 goto out_dput;
57209+ else
57210+ gr_handle_create(dentry, nd->path.mnt);
57211 }
57212 out_no_open:
57213 path->dentry = dentry;
57214@@ -2672,7 +2724,7 @@ out_dput:
57215 /*
57216 * Handle the last step of open()
57217 */
57218-static int do_last(struct nameidata *nd, struct path *path,
57219+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
57220 struct file *file, const struct open_flags *op,
57221 int *opened, struct filename *name)
57222 {
57223@@ -2701,16 +2753,32 @@ static int do_last(struct nameidata *nd, struct path *path,
57224 error = complete_walk(nd);
57225 if (error)
57226 return error;
57227+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
57228+ error = -ENOENT;
57229+ goto out;
57230+ }
57231 audit_inode(name, nd->path.dentry, 0);
57232 if (open_flag & O_CREAT) {
57233 error = -EISDIR;
57234 goto out;
57235 }
57236+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
57237+ error = -EACCES;
57238+ goto out;
57239+ }
57240 goto finish_open;
57241 case LAST_BIND:
57242 error = complete_walk(nd);
57243 if (error)
57244 return error;
57245+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
57246+ error = -ENOENT;
57247+ goto out;
57248+ }
57249+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
57250+ error = -EACCES;
57251+ goto out;
57252+ }
57253 audit_inode(name, dir, 0);
57254 goto finish_open;
57255 }
57256@@ -2759,7 +2827,7 @@ retry_lookup:
57257 */
57258 }
57259 mutex_lock(&dir->d_inode->i_mutex);
57260- error = lookup_open(nd, path, file, op, got_write, opened);
57261+ error = lookup_open(nd, path, link, file, op, got_write, opened);
57262 mutex_unlock(&dir->d_inode->i_mutex);
57263
57264 if (error <= 0) {
57265@@ -2783,11 +2851,28 @@ retry_lookup:
57266 goto finish_open_created;
57267 }
57268
57269+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
57270+ error = -ENOENT;
57271+ goto exit_dput;
57272+ }
57273+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
57274+ error = -EACCES;
57275+ goto exit_dput;
57276+ }
57277+
57278 /*
57279 * create/update audit record if it already exists.
57280 */
57281- if (path->dentry->d_inode)
57282+ if (path->dentry->d_inode) {
57283+ /* only check if O_CREAT is specified, all other checks need to go
57284+ into may_open */
57285+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
57286+ error = -EACCES;
57287+ goto exit_dput;
57288+ }
57289+
57290 audit_inode(name, path->dentry, 0);
57291+ }
57292
57293 /*
57294 * If atomic_open() acquired write access it is dropped now due to
57295@@ -2828,6 +2913,11 @@ finish_lookup:
57296 }
57297 }
57298 BUG_ON(inode != path->dentry->d_inode);
57299+ /* if we're resolving a symlink to another symlink */
57300+ if (link && gr_handle_symlink_owner(link, inode)) {
57301+ error = -EACCES;
57302+ goto out;
57303+ }
57304 return 1;
57305 }
57306
57307@@ -2837,7 +2927,6 @@ finish_lookup:
57308 save_parent.dentry = nd->path.dentry;
57309 save_parent.mnt = mntget(path->mnt);
57310 nd->path.dentry = path->dentry;
57311-
57312 }
57313 nd->inode = inode;
57314 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
57315@@ -2846,6 +2935,16 @@ finish_lookup:
57316 path_put(&save_parent);
57317 return error;
57318 }
57319+
57320+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
57321+ error = -ENOENT;
57322+ goto out;
57323+ }
57324+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
57325+ error = -EACCES;
57326+ goto out;
57327+ }
57328+
57329 error = -EISDIR;
57330 if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode))
57331 goto out;
57332@@ -2944,7 +3043,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
57333 if (unlikely(error))
57334 goto out;
57335
57336- error = do_last(nd, &path, file, op, &opened, pathname);
57337+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
57338 while (unlikely(error > 0)) { /* trailing symlink */
57339 struct path link = path;
57340 void *cookie;
57341@@ -2962,7 +3061,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
57342 error = follow_link(&link, nd, &cookie);
57343 if (unlikely(error))
57344 break;
57345- error = do_last(nd, &path, file, op, &opened, pathname);
57346+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
57347 put_link(nd, &link, cookie);
57348 }
57349 out:
57350@@ -3062,8 +3161,12 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
57351 goto unlock;
57352
57353 error = -EEXIST;
57354- if (dentry->d_inode)
57355+ if (dentry->d_inode) {
57356+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
57357+ error = -ENOENT;
57358+ }
57359 goto fail;
57360+ }
57361 /*
57362 * Special case - lookup gave negative, but... we had foo/bar/
57363 * From the vfs_mknod() POV we just have a negative dentry -
57364@@ -3115,6 +3218,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
57365 }
57366 EXPORT_SYMBOL(user_path_create);
57367
57368+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
57369+{
57370+ struct filename *tmp = getname(pathname);
57371+ struct dentry *res;
57372+ if (IS_ERR(tmp))
57373+ return ERR_CAST(tmp);
57374+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
57375+ if (IS_ERR(res))
57376+ putname(tmp);
57377+ else
57378+ *to = tmp;
57379+ return res;
57380+}
57381+
57382 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
57383 {
57384 int error = may_create(dir, dentry);
57385@@ -3177,6 +3294,17 @@ retry:
57386
57387 if (!IS_POSIXACL(path.dentry->d_inode))
57388 mode &= ~current_umask();
57389+
57390+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
57391+ error = -EPERM;
57392+ goto out;
57393+ }
57394+
57395+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
57396+ error = -EACCES;
57397+ goto out;
57398+ }
57399+
57400 error = security_path_mknod(&path, dentry, mode, dev);
57401 if (error)
57402 goto out;
57403@@ -3193,6 +3321,8 @@ retry:
57404 break;
57405 }
57406 out:
57407+ if (!error)
57408+ gr_handle_create(dentry, path.mnt);
57409 done_path_create(&path, dentry);
57410 if (retry_estale(error, lookup_flags)) {
57411 lookup_flags |= LOOKUP_REVAL;
57412@@ -3245,9 +3375,16 @@ retry:
57413
57414 if (!IS_POSIXACL(path.dentry->d_inode))
57415 mode &= ~current_umask();
57416+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
57417+ error = -EACCES;
57418+ goto out;
57419+ }
57420 error = security_path_mkdir(&path, dentry, mode);
57421 if (!error)
57422 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
57423+ if (!error)
57424+ gr_handle_create(dentry, path.mnt);
57425+out:
57426 done_path_create(&path, dentry);
57427 if (retry_estale(error, lookup_flags)) {
57428 lookup_flags |= LOOKUP_REVAL;
57429@@ -3328,6 +3465,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
57430 struct filename *name;
57431 struct dentry *dentry;
57432 struct nameidata nd;
57433+ ino_t saved_ino = 0;
57434+ dev_t saved_dev = 0;
57435 unsigned int lookup_flags = 0;
57436 retry:
57437 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
57438@@ -3360,10 +3499,21 @@ retry:
57439 error = -ENOENT;
57440 goto exit3;
57441 }
57442+
57443+ saved_ino = dentry->d_inode->i_ino;
57444+ saved_dev = gr_get_dev_from_dentry(dentry);
57445+
57446+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
57447+ error = -EACCES;
57448+ goto exit3;
57449+ }
57450+
57451 error = security_path_rmdir(&nd.path, dentry);
57452 if (error)
57453 goto exit3;
57454 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
57455+ if (!error && (saved_dev || saved_ino))
57456+ gr_handle_delete(saved_ino, saved_dev);
57457 exit3:
57458 dput(dentry);
57459 exit2:
57460@@ -3429,6 +3579,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
57461 struct dentry *dentry;
57462 struct nameidata nd;
57463 struct inode *inode = NULL;
57464+ ino_t saved_ino = 0;
57465+ dev_t saved_dev = 0;
57466 unsigned int lookup_flags = 0;
57467 retry:
57468 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
57469@@ -3455,10 +3607,22 @@ retry:
57470 if (!inode)
57471 goto slashes;
57472 ihold(inode);
57473+
57474+ if (inode->i_nlink <= 1) {
57475+ saved_ino = inode->i_ino;
57476+ saved_dev = gr_get_dev_from_dentry(dentry);
57477+ }
57478+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
57479+ error = -EACCES;
57480+ goto exit2;
57481+ }
57482+
57483 error = security_path_unlink(&nd.path, dentry);
57484 if (error)
57485 goto exit2;
57486 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
57487+ if (!error && (saved_ino || saved_dev))
57488+ gr_handle_delete(saved_ino, saved_dev);
57489 exit2:
57490 dput(dentry);
57491 }
57492@@ -3536,9 +3700,17 @@ retry:
57493 if (IS_ERR(dentry))
57494 goto out_putname;
57495
57496+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
57497+ error = -EACCES;
57498+ goto out;
57499+ }
57500+
57501 error = security_path_symlink(&path, dentry, from->name);
57502 if (!error)
57503 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
57504+ if (!error)
57505+ gr_handle_create(dentry, path.mnt);
57506+out:
57507 done_path_create(&path, dentry);
57508 if (retry_estale(error, lookup_flags)) {
57509 lookup_flags |= LOOKUP_REVAL;
57510@@ -3612,6 +3784,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
57511 {
57512 struct dentry *new_dentry;
57513 struct path old_path, new_path;
57514+ struct filename *to = NULL;
57515 int how = 0;
57516 int error;
57517
57518@@ -3635,7 +3808,7 @@ retry:
57519 if (error)
57520 return error;
57521
57522- new_dentry = user_path_create(newdfd, newname, &new_path,
57523+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
57524 (how & LOOKUP_REVAL));
57525 error = PTR_ERR(new_dentry);
57526 if (IS_ERR(new_dentry))
57527@@ -3647,11 +3820,28 @@ retry:
57528 error = may_linkat(&old_path);
57529 if (unlikely(error))
57530 goto out_dput;
57531+
57532+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
57533+ old_path.dentry->d_inode,
57534+ old_path.dentry->d_inode->i_mode, to)) {
57535+ error = -EACCES;
57536+ goto out_dput;
57537+ }
57538+
57539+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
57540+ old_path.dentry, old_path.mnt, to)) {
57541+ error = -EACCES;
57542+ goto out_dput;
57543+ }
57544+
57545 error = security_path_link(old_path.dentry, &new_path, new_dentry);
57546 if (error)
57547 goto out_dput;
57548 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
57549+ if (!error)
57550+ gr_handle_create(new_dentry, new_path.mnt);
57551 out_dput:
57552+ putname(to);
57553 done_path_create(&new_path, new_dentry);
57554 if (retry_estale(error, how)) {
57555 how |= LOOKUP_REVAL;
57556@@ -3897,12 +4087,21 @@ retry:
57557 if (new_dentry == trap)
57558 goto exit5;
57559
57560+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
57561+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
57562+ to);
57563+ if (error)
57564+ goto exit5;
57565+
57566 error = security_path_rename(&oldnd.path, old_dentry,
57567 &newnd.path, new_dentry);
57568 if (error)
57569 goto exit5;
57570 error = vfs_rename(old_dir->d_inode, old_dentry,
57571 new_dir->d_inode, new_dentry);
57572+ if (!error)
57573+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
57574+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
57575 exit5:
57576 dput(new_dentry);
57577 exit4:
57578@@ -3934,6 +4133,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
57579
57580 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
57581 {
57582+ char tmpbuf[64];
57583+ const char *newlink;
57584 int len;
57585
57586 len = PTR_ERR(link);
57587@@ -3943,7 +4144,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
57588 len = strlen(link);
57589 if (len > (unsigned) buflen)
57590 len = buflen;
57591- if (copy_to_user(buffer, link, len))
57592+
57593+ if (len < sizeof(tmpbuf)) {
57594+ memcpy(tmpbuf, link, len);
57595+ newlink = tmpbuf;
57596+ } else
57597+ newlink = link;
57598+
57599+ if (copy_to_user(buffer, newlink, len))
57600 len = -EFAULT;
57601 out:
57602 return len;
57603diff --git a/fs/namespace.c b/fs/namespace.c
e2b79cd1 57604index a45ba4f..e7dc489 100644
bb5f0bf8
AF
57605--- a/fs/namespace.c
57606+++ b/fs/namespace.c
57607@@ -1265,6 +1265,9 @@ static int do_umount(struct mount *mnt, int flags)
57608 if (!(sb->s_flags & MS_RDONLY))
57609 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
57610 up_write(&sb->s_umount);
57611+
57612+ gr_log_remount(mnt->mnt_devname, retval);
57613+
57614 return retval;
57615 }
57616
57617@@ -1283,6 +1286,9 @@ static int do_umount(struct mount *mnt, int flags)
57618 }
57619 br_write_unlock(&vfsmount_lock);
57620 namespace_unlock();
57621+
57622+ gr_log_unmount(mnt->mnt_devname, retval);
57623+
57624 return retval;
57625 }
57626
57627@@ -1302,7 +1308,7 @@ static inline bool may_mount(void)
57628 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
57629 */
57630
57631-SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
57632+SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
57633 {
57634 struct path path;
57635 struct mount *mnt;
57636@@ -1342,7 +1348,7 @@ out:
57637 /*
57638 * The 2.0 compatible umount. No flags.
57639 */
57640-SYSCALL_DEFINE1(oldumount, char __user *, name)
57641+SYSCALL_DEFINE1(oldumount, const char __user *, name)
57642 {
57643 return sys_umount(name, 0);
57644 }
57645@@ -2313,6 +2319,16 @@ long do_mount(const char *dev_name, const char *dir_name,
57646 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
57647 MS_STRICTATIME);
57648
57649+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
57650+ retval = -EPERM;
57651+ goto dput_out;
57652+ }
57653+
57654+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
57655+ retval = -EPERM;
57656+ goto dput_out;
57657+ }
57658+
57659 if (flags & MS_REMOUNT)
57660 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
57661 data_page);
57662@@ -2327,6 +2343,9 @@ long do_mount(const char *dev_name, const char *dir_name,
57663 dev_name, data_page);
57664 dput_out:
57665 path_put(&path);
57666+
57667+ gr_log_mount(dev_name, dir_name, retval);
57668+
57669 return retval;
57670 }
57671
57672@@ -2500,8 +2519,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
57673 }
57674 EXPORT_SYMBOL(mount_subtree);
57675
57676-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
57677- char __user *, type, unsigned long, flags, void __user *, data)
57678+SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
57679+ const char __user *, type, unsigned long, flags, void __user *, data)
57680 {
57681 int ret;
57682 char *kernel_type;
57683@@ -2614,6 +2633,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
57684 if (error)
57685 goto out2;
57686
57687+ if (gr_handle_chroot_pivot()) {
57688+ error = -EPERM;
57689+ goto out2;
57690+ }
57691+
57692 get_fs_root(current->fs, &root);
57693 old_mp = lock_mount(&old);
57694 error = PTR_ERR(old_mp);
57695@@ -2864,7 +2888,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
57696 !nsown_capable(CAP_SYS_ADMIN))
57697 return -EPERM;
57698
57699- if (fs->users != 1)
57700+ if (atomic_read(&fs->users) != 1)
57701 return -EINVAL;
57702
57703 get_mnt_ns(mnt_ns);
57704diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
57705index cff089a..4c3d57a 100644
57706--- a/fs/nfs/callback.c
57707+++ b/fs/nfs/callback.c
57708@@ -211,7 +211,6 @@ static int nfs_callback_start_svc(int minorversion, struct rpc_xprt *xprt,
57709 struct svc_rqst *rqstp;
57710 int (*callback_svc)(void *vrqstp);
57711 struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion];
57712- char svc_name[12];
57713 int ret;
57714
57715 nfs_callback_bc_serv(minorversion, xprt, serv);
57716@@ -235,10 +234,9 @@ static int nfs_callback_start_svc(int minorversion, struct rpc_xprt *xprt,
57717
57718 svc_sock_update_bufs(serv);
57719
57720- sprintf(svc_name, "nfsv4.%u-svc", minorversion);
57721 cb_info->serv = serv;
57722 cb_info->rqst = rqstp;
57723- cb_info->task = kthread_run(callback_svc, cb_info->rqst, svc_name);
57724+ cb_info->task = kthread_run(callback_svc, cb_info->rqst, "nfsv4.%u-svc", minorversion);
57725 if (IS_ERR(cb_info->task)) {
57726 ret = PTR_ERR(cb_info->task);
57727 svc_exit_thread(cb_info->rqst);
57728diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
57729index a35582c..ebbdcd5 100644
57730--- a/fs/nfs/callback_xdr.c
57731+++ b/fs/nfs/callback_xdr.c
57732@@ -51,7 +51,7 @@ struct callback_op {
57733 callback_decode_arg_t decode_args;
57734 callback_encode_res_t encode_res;
57735 long res_maxsize;
57736-};
57737+} __do_const;
57738
57739 static struct callback_op callback_ops[];
57740
57741diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
57742index c1c7a9d..7afa0b8 100644
57743--- a/fs/nfs/inode.c
57744+++ b/fs/nfs/inode.c
57745@@ -1043,16 +1043,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
57746 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
57747 }
57748
57749-static atomic_long_t nfs_attr_generation_counter;
57750+static atomic_long_unchecked_t nfs_attr_generation_counter;
57751
57752 static unsigned long nfs_read_attr_generation_counter(void)
57753 {
57754- return atomic_long_read(&nfs_attr_generation_counter);
57755+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
57756 }
57757
57758 unsigned long nfs_inc_attr_generation_counter(void)
57759 {
57760- return atomic_long_inc_return(&nfs_attr_generation_counter);
57761+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
57762 }
57763
57764 void nfs_fattr_init(struct nfs_fattr *fattr)
57765diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
57766index 2c37442..9b9538b 100644
57767--- a/fs/nfs/nfs4state.c
57768+++ b/fs/nfs/nfs4state.c
57769@@ -1193,7 +1193,7 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
57770 snprintf(buf, sizeof(buf), "%s-manager",
57771 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR));
57772 rcu_read_unlock();
57773- task = kthread_run(nfs4_run_state_manager, clp, buf);
57774+ task = kthread_run(nfs4_run_state_manager, clp, "%s", buf);
57775 if (IS_ERR(task)) {
57776 printk(KERN_ERR "%s: kthread_run: %ld\n",
57777 __func__, PTR_ERR(task));
57778diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
57779index 27d74a2..c4c2a73 100644
57780--- a/fs/nfsd/nfs4proc.c
57781+++ b/fs/nfsd/nfs4proc.c
57782@@ -1126,7 +1126,7 @@ struct nfsd4_operation {
57783 nfsd4op_rsize op_rsize_bop;
57784 stateid_getter op_get_currentstateid;
57785 stateid_setter op_set_currentstateid;
57786-};
57787+} __do_const;
57788
57789 static struct nfsd4_operation nfsd4_ops[];
57790
57791diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
57792index 582321a..0224663 100644
57793--- a/fs/nfsd/nfs4xdr.c
57794+++ b/fs/nfsd/nfs4xdr.c
57795@@ -1458,7 +1458,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
57796
57797 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
57798
57799-static nfsd4_dec nfsd4_dec_ops[] = {
57800+static const nfsd4_dec nfsd4_dec_ops[] = {
57801 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
57802 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
57803 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
57804@@ -1498,7 +1498,7 @@ static nfsd4_dec nfsd4_dec_ops[] = {
57805 [OP_RELEASE_LOCKOWNER] = (nfsd4_dec)nfsd4_decode_release_lockowner,
57806 };
57807
57808-static nfsd4_dec nfsd41_dec_ops[] = {
57809+static const nfsd4_dec nfsd41_dec_ops[] = {
57810 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
57811 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
57812 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
57813@@ -1560,7 +1560,7 @@ static nfsd4_dec nfsd41_dec_ops[] = {
57814 };
57815
57816 struct nfsd4_minorversion_ops {
57817- nfsd4_dec *decoders;
57818+ const nfsd4_dec *decoders;
57819 int nops;
57820 };
57821
57822diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
57823index e76244e..9fe8f2f1 100644
57824--- a/fs/nfsd/nfscache.c
57825+++ b/fs/nfsd/nfscache.c
57826@@ -526,14 +526,17 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
57827 {
57828 struct svc_cacherep *rp = rqstp->rq_cacherep;
57829 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
57830- int len;
57831+ long len;
57832 size_t bufsize = 0;
57833
57834 if (!rp)
57835 return;
57836
57837- len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
57838- len >>= 2;
57839+ if (statp) {
57840+ len = (char*)statp - (char*)resv->iov_base;
57841+ len = resv->iov_len - len;
57842+ len >>= 2;
57843+ }
57844
57845 /* Don't cache excessive amounts of data and XDR failures */
57846 if (!statp || len > (256 >> 2)) {
57847diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
57848index baf149a..76b86ad 100644
57849--- a/fs/nfsd/vfs.c
57850+++ b/fs/nfsd/vfs.c
57851@@ -940,7 +940,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
57852 } else {
57853 oldfs = get_fs();
57854 set_fs(KERNEL_DS);
57855- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
57856+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
57857 set_fs(oldfs);
57858 }
57859
57860@@ -1027,7 +1027,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
57861
57862 /* Write the data. */
57863 oldfs = get_fs(); set_fs(KERNEL_DS);
57864- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
57865+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
57866 set_fs(oldfs);
57867 if (host_err < 0)
57868 goto out_nfserr;
57869@@ -1573,7 +1573,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
57870 */
57871
57872 oldfs = get_fs(); set_fs(KERNEL_DS);
57873- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
57874+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
57875 set_fs(oldfs);
57876
57877 if (host_err < 0)
57878diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
57879index fea6bd5..8ee9d81 100644
57880--- a/fs/nls/nls_base.c
57881+++ b/fs/nls/nls_base.c
57882@@ -234,20 +234,22 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
57883
57884 int register_nls(struct nls_table * nls)
57885 {
57886- struct nls_table ** tmp = &tables;
57887+ struct nls_table *tmp = tables;
57888
57889 if (nls->next)
57890 return -EBUSY;
57891
57892 spin_lock(&nls_lock);
57893- while (*tmp) {
57894- if (nls == *tmp) {
57895+ while (tmp) {
57896+ if (nls == tmp) {
57897 spin_unlock(&nls_lock);
57898 return -EBUSY;
57899 }
57900- tmp = &(*tmp)->next;
57901+ tmp = tmp->next;
57902 }
57903- nls->next = tables;
57904+ pax_open_kernel();
57905+ *(struct nls_table **)&nls->next = tables;
57906+ pax_close_kernel();
57907 tables = nls;
57908 spin_unlock(&nls_lock);
57909 return 0;
57910@@ -255,12 +257,14 @@ int register_nls(struct nls_table * nls)
57911
57912 int unregister_nls(struct nls_table * nls)
57913 {
57914- struct nls_table ** tmp = &tables;
57915+ struct nls_table * const * tmp = &tables;
57916
57917 spin_lock(&nls_lock);
57918 while (*tmp) {
57919 if (nls == *tmp) {
57920- *tmp = nls->next;
57921+ pax_open_kernel();
57922+ *(struct nls_table **)tmp = nls->next;
57923+ pax_close_kernel();
57924 spin_unlock(&nls_lock);
57925 return 0;
57926 }
57927diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
57928index 7424929..35f6be5 100644
57929--- a/fs/nls/nls_euc-jp.c
57930+++ b/fs/nls/nls_euc-jp.c
57931@@ -561,8 +561,10 @@ static int __init init_nls_euc_jp(void)
57932 p_nls = load_nls("cp932");
57933
57934 if (p_nls) {
57935- table.charset2upper = p_nls->charset2upper;
57936- table.charset2lower = p_nls->charset2lower;
57937+ pax_open_kernel();
57938+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
57939+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
57940+ pax_close_kernel();
57941 return register_nls(&table);
57942 }
57943
57944diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
57945index e7bc1d7..06bd4bb 100644
57946--- a/fs/nls/nls_koi8-ru.c
57947+++ b/fs/nls/nls_koi8-ru.c
57948@@ -63,8 +63,10 @@ static int __init init_nls_koi8_ru(void)
57949 p_nls = load_nls("koi8-u");
57950
57951 if (p_nls) {
57952- table.charset2upper = p_nls->charset2upper;
57953- table.charset2lower = p_nls->charset2lower;
57954+ pax_open_kernel();
57955+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
57956+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
57957+ pax_close_kernel();
57958 return register_nls(&table);
57959 }
57960
57961diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
57962index 77cc85d..a1e6299 100644
57963--- a/fs/notify/fanotify/fanotify_user.c
57964+++ b/fs/notify/fanotify/fanotify_user.c
57965@@ -253,8 +253,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
57966
57967 fd = fanotify_event_metadata.fd;
57968 ret = -EFAULT;
57969- if (copy_to_user(buf, &fanotify_event_metadata,
57970- fanotify_event_metadata.event_len))
57971+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
57972+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
57973 goto out_close_fd;
57974
57975 ret = prepare_for_access_response(group, event, fd);
57976diff --git a/fs/notify/notification.c b/fs/notify/notification.c
57977index 7b51b05..5ea5ef6 100644
57978--- a/fs/notify/notification.c
57979+++ b/fs/notify/notification.c
57980@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
57981 * get set to 0 so it will never get 'freed'
57982 */
57983 static struct fsnotify_event *q_overflow_event;
57984-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
57985+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
57986
57987 /**
57988 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
57989@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
57990 */
57991 u32 fsnotify_get_cookie(void)
57992 {
57993- return atomic_inc_return(&fsnotify_sync_cookie);
57994+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
57995 }
57996 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
57997
57998diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
57999index aa411c3..c260a84 100644
58000--- a/fs/ntfs/dir.c
58001+++ b/fs/ntfs/dir.c
58002@@ -1329,7 +1329,7 @@ find_next_index_buffer:
58003 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
58004 ~(s64)(ndir->itype.index.block_size - 1)));
58005 /* Bounds checks. */
58006- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
58007+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
58008 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
58009 "inode 0x%lx or driver bug.", vdir->i_ino);
58010 goto err_out;
58011diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
e2b79cd1 58012index c5670b8..2b43d9b 100644
bb5f0bf8
AF
58013--- a/fs/ntfs/file.c
58014+++ b/fs/ntfs/file.c
e2b79cd1
AF
58015@@ -1282,7 +1282,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
58016 char *addr;
58017 size_t total = 0;
58018 unsigned len;
58019- int left;
58020+ unsigned left;
58021
58022 do {
58023 len = PAGE_CACHE_SIZE - ofs;
bb5f0bf8
AF
58024@@ -2241,6 +2241,6 @@ const struct inode_operations ntfs_file_inode_ops = {
58025 #endif /* NTFS_RW */
58026 };
58027
58028-const struct file_operations ntfs_empty_file_ops = {};
58029+const struct file_operations ntfs_empty_file_ops __read_only;
58030
58031-const struct inode_operations ntfs_empty_inode_ops = {};
58032+const struct inode_operations ntfs_empty_inode_ops __read_only;
58033diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
58034index 20dfec7..e238cb7 100644
58035--- a/fs/ocfs2/aops.c
58036+++ b/fs/ocfs2/aops.c
58037@@ -1756,7 +1756,7 @@ try_again:
58038 goto out;
58039 } else if (ret == 1) {
58040 clusters_need = wc->w_clen;
58041- ret = ocfs2_refcount_cow(inode, filp, di_bh,
58042+ ret = ocfs2_refcount_cow(inode, di_bh,
58043 wc->w_cpos, wc->w_clen, UINT_MAX);
58044 if (ret) {
58045 mlog_errno(ret);
58046diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
58047index ff54014..ff125fd 100644
58048--- a/fs/ocfs2/file.c
58049+++ b/fs/ocfs2/file.c
58050@@ -370,7 +370,7 @@ static int ocfs2_cow_file_pos(struct inode *inode,
58051 if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
58052 goto out;
58053
58054- return ocfs2_refcount_cow(inode, NULL, fe_bh, cpos, 1, cpos+1);
58055+ return ocfs2_refcount_cow(inode, fe_bh, cpos, 1, cpos+1);
58056
58057 out:
58058 return status;
58059@@ -899,7 +899,7 @@ static int ocfs2_zero_extend_get_range(struct inode *inode,
58060 zero_clusters = last_cpos - zero_cpos;
58061
58062 if (needs_cow) {
58063- rc = ocfs2_refcount_cow(inode, NULL, di_bh, zero_cpos,
58064+ rc = ocfs2_refcount_cow(inode, di_bh, zero_cpos,
58065 zero_clusters, UINT_MAX);
58066 if (rc) {
58067 mlog_errno(rc);
58068@@ -2078,7 +2078,7 @@ static int ocfs2_prepare_inode_for_refcount(struct inode *inode,
58069
58070 *meta_level = 1;
58071
58072- ret = ocfs2_refcount_cow(inode, file, di_bh, cpos, clusters, UINT_MAX);
58073+ ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
58074 if (ret)
58075 mlog_errno(ret);
58076 out:
58077diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
58078index aebeacd..0dcdd26 100644
58079--- a/fs/ocfs2/localalloc.c
58080+++ b/fs/ocfs2/localalloc.c
58081@@ -1278,7 +1278,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
58082 goto bail;
58083 }
58084
58085- atomic_inc(&osb->alloc_stats.moves);
58086+ atomic_inc_unchecked(&osb->alloc_stats.moves);
58087
58088 bail:
58089 if (handle)
58090diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
58091index f1fc172..452068b 100644
58092--- a/fs/ocfs2/move_extents.c
58093+++ b/fs/ocfs2/move_extents.c
58094@@ -69,7 +69,7 @@ static int __ocfs2_move_extent(handle_t *handle,
58095 u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci);
58096 u64 old_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cpos);
58097
58098- ret = ocfs2_duplicate_clusters_by_page(handle, context->file, cpos,
58099+ ret = ocfs2_duplicate_clusters_by_page(handle, inode, cpos,
58100 p_cpos, new_p_cpos, len);
58101 if (ret) {
58102 mlog_errno(ret);
58103diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
58104index d355e6e..578d905 100644
58105--- a/fs/ocfs2/ocfs2.h
58106+++ b/fs/ocfs2/ocfs2.h
58107@@ -235,11 +235,11 @@ enum ocfs2_vol_state
58108
58109 struct ocfs2_alloc_stats
58110 {
58111- atomic_t moves;
58112- atomic_t local_data;
58113- atomic_t bitmap_data;
58114- atomic_t bg_allocs;
58115- atomic_t bg_extends;
58116+ atomic_unchecked_t moves;
58117+ atomic_unchecked_t local_data;
58118+ atomic_unchecked_t bitmap_data;
58119+ atomic_unchecked_t bg_allocs;
58120+ atomic_unchecked_t bg_extends;
58121 };
58122
58123 enum ocfs2_local_alloc_state
58124diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
58125index 998b17e..aefe414 100644
58126--- a/fs/ocfs2/refcounttree.c
58127+++ b/fs/ocfs2/refcounttree.c
58128@@ -49,7 +49,6 @@
58129
58130 struct ocfs2_cow_context {
58131 struct inode *inode;
58132- struct file *file;
58133 u32 cow_start;
58134 u32 cow_len;
58135 struct ocfs2_extent_tree data_et;
58136@@ -66,7 +65,7 @@ struct ocfs2_cow_context {
58137 u32 *num_clusters,
58138 unsigned int *extent_flags);
58139 int (*cow_duplicate_clusters)(handle_t *handle,
58140- struct file *file,
58141+ struct inode *inode,
58142 u32 cpos, u32 old_cluster,
58143 u32 new_cluster, u32 new_len);
58144 };
58145@@ -2922,14 +2921,12 @@ static int ocfs2_clear_cow_buffer(handle_t *handle, struct buffer_head *bh)
58146 }
58147
58148 int ocfs2_duplicate_clusters_by_page(handle_t *handle,
58149- struct file *file,
58150+ struct inode *inode,
58151 u32 cpos, u32 old_cluster,
58152 u32 new_cluster, u32 new_len)
58153 {
58154 int ret = 0, partial;
58155- struct inode *inode = file_inode(file);
58156- struct ocfs2_caching_info *ci = INODE_CACHE(inode);
58157- struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
58158+ struct super_block *sb = inode->i_sb;
58159 u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
58160 struct page *page;
58161 pgoff_t page_index;
58162@@ -2973,13 +2970,6 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
58163 if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize)
58164 BUG_ON(PageDirty(page));
58165
58166- if (PageReadahead(page)) {
58167- page_cache_async_readahead(mapping,
58168- &file->f_ra, file,
58169- page, page_index,
58170- readahead_pages);
58171- }
58172-
58173 if (!PageUptodate(page)) {
58174 ret = block_read_full_page(page, ocfs2_get_block);
58175 if (ret) {
58176@@ -2999,7 +2989,8 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
58177 }
58178 }
58179
58180- ocfs2_map_and_dirty_page(inode, handle, from, to,
58181+ ocfs2_map_and_dirty_page(inode,
58182+ handle, from, to,
58183 page, 0, &new_block);
58184 mark_page_accessed(page);
58185 unlock:
58186@@ -3015,12 +3006,11 @@ unlock:
58187 }
58188
58189 int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
58190- struct file *file,
58191+ struct inode *inode,
58192 u32 cpos, u32 old_cluster,
58193 u32 new_cluster, u32 new_len)
58194 {
58195 int ret = 0;
58196- struct inode *inode = file_inode(file);
58197 struct super_block *sb = inode->i_sb;
58198 struct ocfs2_caching_info *ci = INODE_CACHE(inode);
58199 int i, blocks = ocfs2_clusters_to_blocks(sb, new_len);
58200@@ -3145,7 +3135,7 @@ static int ocfs2_replace_clusters(handle_t *handle,
58201
58202 /*If the old clusters is unwritten, no need to duplicate. */
58203 if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) {
58204- ret = context->cow_duplicate_clusters(handle, context->file,
58205+ ret = context->cow_duplicate_clusters(handle, context->inode,
58206 cpos, old, new, len);
58207 if (ret) {
58208 mlog_errno(ret);
58209@@ -3423,35 +3413,12 @@ static int ocfs2_replace_cow(struct ocfs2_cow_context *context)
58210 return ret;
58211 }
58212
58213-static void ocfs2_readahead_for_cow(struct inode *inode,
58214- struct file *file,
58215- u32 start, u32 len)
58216-{
58217- struct address_space *mapping;
58218- pgoff_t index;
58219- unsigned long num_pages;
58220- int cs_bits = OCFS2_SB(inode->i_sb)->s_clustersize_bits;
58221-
58222- if (!file)
58223- return;
58224-
58225- mapping = file->f_mapping;
58226- num_pages = (len << cs_bits) >> PAGE_CACHE_SHIFT;
58227- if (!num_pages)
58228- num_pages = 1;
58229-
58230- index = ((loff_t)start << cs_bits) >> PAGE_CACHE_SHIFT;
58231- page_cache_sync_readahead(mapping, &file->f_ra, file,
58232- index, num_pages);
58233-}
58234-
58235 /*
58236 * Starting at cpos, try to CoW write_len clusters. Don't CoW
58237 * past max_cpos. This will stop when it runs into a hole or an
58238 * unrefcounted extent.
58239 */
58240 static int ocfs2_refcount_cow_hunk(struct inode *inode,
58241- struct file *file,
58242 struct buffer_head *di_bh,
58243 u32 cpos, u32 write_len, u32 max_cpos)
58244 {
58245@@ -3480,8 +3447,6 @@ static int ocfs2_refcount_cow_hunk(struct inode *inode,
58246
58247 BUG_ON(cow_len == 0);
58248
58249- ocfs2_readahead_for_cow(inode, file, cow_start, cow_len);
58250-
58251 context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS);
58252 if (!context) {
58253 ret = -ENOMEM;
58254@@ -3503,7 +3468,6 @@ static int ocfs2_refcount_cow_hunk(struct inode *inode,
58255 context->ref_root_bh = ref_root_bh;
58256 context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page;
58257 context->get_clusters = ocfs2_di_get_clusters;
58258- context->file = file;
58259
58260 ocfs2_init_dinode_extent_tree(&context->data_et,
58261 INODE_CACHE(inode), di_bh);
58262@@ -3532,7 +3496,6 @@ out:
58263 * clusters between cpos and cpos+write_len are safe to modify.
58264 */
58265 int ocfs2_refcount_cow(struct inode *inode,
58266- struct file *file,
58267 struct buffer_head *di_bh,
58268 u32 cpos, u32 write_len, u32 max_cpos)
58269 {
58270@@ -3552,7 +3515,7 @@ int ocfs2_refcount_cow(struct inode *inode,
58271 num_clusters = write_len;
58272
58273 if (ext_flags & OCFS2_EXT_REFCOUNTED) {
58274- ret = ocfs2_refcount_cow_hunk(inode, file, di_bh, cpos,
58275+ ret = ocfs2_refcount_cow_hunk(inode, di_bh, cpos,
58276 num_clusters, max_cpos);
58277 if (ret) {
58278 mlog_errno(ret);
58279diff --git a/fs/ocfs2/refcounttree.h b/fs/ocfs2/refcounttree.h
58280index 7754608..6422bbcdb 100644
58281--- a/fs/ocfs2/refcounttree.h
58282+++ b/fs/ocfs2/refcounttree.h
58283@@ -53,7 +53,7 @@ int ocfs2_prepare_refcount_change_for_del(struct inode *inode,
58284 int *credits,
58285 int *ref_blocks);
58286 int ocfs2_refcount_cow(struct inode *inode,
58287- struct file *filep, struct buffer_head *di_bh,
58288+ struct buffer_head *di_bh,
58289 u32 cpos, u32 write_len, u32 max_cpos);
58290
58291 typedef int (ocfs2_post_refcount_func)(struct inode *inode,
58292@@ -85,11 +85,11 @@ int ocfs2_refcount_cow_xattr(struct inode *inode,
58293 u32 cpos, u32 write_len,
58294 struct ocfs2_post_refcount *post);
58295 int ocfs2_duplicate_clusters_by_page(handle_t *handle,
58296- struct file *file,
58297+ struct inode *inode,
58298 u32 cpos, u32 old_cluster,
58299 u32 new_cluster, u32 new_len);
58300 int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
58301- struct file *file,
58302+ struct inode *inode,
58303 u32 cpos, u32 old_cluster,
58304 u32 new_cluster, u32 new_len);
58305 int ocfs2_cow_sync_writeback(struct super_block *sb,
58306diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
58307index b7e74b5..19c6536 100644
58308--- a/fs/ocfs2/suballoc.c
58309+++ b/fs/ocfs2/suballoc.c
58310@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
58311 mlog_errno(status);
58312 goto bail;
58313 }
58314- atomic_inc(&osb->alloc_stats.bg_extends);
58315+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
58316
58317 /* You should never ask for this much metadata */
58318 BUG_ON(bits_wanted >
58319@@ -2007,7 +2007,7 @@ int ocfs2_claim_metadata(handle_t *handle,
58320 mlog_errno(status);
58321 goto bail;
58322 }
58323- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
58324+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
58325
58326 *suballoc_loc = res.sr_bg_blkno;
58327 *suballoc_bit_start = res.sr_bit_offset;
58328@@ -2171,7 +2171,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
58329 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
58330 res->sr_bits);
58331
58332- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
58333+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
58334
58335 BUG_ON(res->sr_bits != 1);
58336
58337@@ -2213,7 +2213,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
58338 mlog_errno(status);
58339 goto bail;
58340 }
58341- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
58342+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
58343
58344 BUG_ON(res.sr_bits != 1);
58345
58346@@ -2317,7 +2317,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
58347 cluster_start,
58348 num_clusters);
58349 if (!status)
58350- atomic_inc(&osb->alloc_stats.local_data);
58351+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
58352 } else {
58353 if (min_clusters > (osb->bitmap_cpg - 1)) {
58354 /* The only paths asking for contiguousness
58355@@ -2343,7 +2343,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
58356 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
58357 res.sr_bg_blkno,
58358 res.sr_bit_offset);
58359- atomic_inc(&osb->alloc_stats.bitmap_data);
58360+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
58361 *num_clusters = res.sr_bits;
58362 }
58363 }
58364diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
58365index 01b8516..579c4df 100644
58366--- a/fs/ocfs2/super.c
58367+++ b/fs/ocfs2/super.c
58368@@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
58369 "%10s => GlobalAllocs: %d LocalAllocs: %d "
58370 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
58371 "Stats",
58372- atomic_read(&osb->alloc_stats.bitmap_data),
58373- atomic_read(&osb->alloc_stats.local_data),
58374- atomic_read(&osb->alloc_stats.bg_allocs),
58375- atomic_read(&osb->alloc_stats.moves),
58376- atomic_read(&osb->alloc_stats.bg_extends));
58377+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
58378+ atomic_read_unchecked(&osb->alloc_stats.local_data),
58379+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
58380+ atomic_read_unchecked(&osb->alloc_stats.moves),
58381+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
58382
58383 out += snprintf(buf + out, len - out,
58384 "%10s => State: %u Descriptor: %llu Size: %u bits "
58385@@ -2122,11 +2122,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
58386 spin_lock_init(&osb->osb_xattr_lock);
58387 ocfs2_init_steal_slots(osb);
58388
58389- atomic_set(&osb->alloc_stats.moves, 0);
58390- atomic_set(&osb->alloc_stats.local_data, 0);
58391- atomic_set(&osb->alloc_stats.bitmap_data, 0);
58392- atomic_set(&osb->alloc_stats.bg_allocs, 0);
58393- atomic_set(&osb->alloc_stats.bg_extends, 0);
58394+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
58395+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
58396+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
58397+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
58398+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
58399
58400 /* Copy the blockcheck stats from the superblock probe */
58401 osb->osb_ecc_stats = *stats;
58402diff --git a/fs/open.c b/fs/open.c
58403index 8c74100..4239c48 100644
58404--- a/fs/open.c
58405+++ b/fs/open.c
58406@@ -32,6 +32,8 @@
58407 #include <linux/dnotify.h>
58408 #include <linux/compat.h>
58409
58410+#define CREATE_TRACE_POINTS
58411+#include <trace/events/fs.h>
58412 #include "internal.h"
58413
58414 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
58415@@ -102,6 +104,8 @@ long vfs_truncate(struct path *path, loff_t length)
58416 error = locks_verify_truncate(inode, NULL, length);
58417 if (!error)
58418 error = security_path_truncate(path);
58419+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
58420+ error = -EACCES;
58421 if (!error)
58422 error = do_truncate(path->dentry, length, 0, NULL);
58423
58424@@ -186,6 +190,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
58425 error = locks_verify_truncate(inode, f.file, length);
58426 if (!error)
58427 error = security_path_truncate(&f.file->f_path);
58428+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
58429+ error = -EACCES;
58430 if (!error)
58431 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
58432 sb_end_write(inode->i_sb);
58433@@ -360,6 +366,9 @@ retry:
58434 if (__mnt_is_readonly(path.mnt))
58435 res = -EROFS;
58436
58437+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
58438+ res = -EACCES;
58439+
58440 out_path_release:
58441 path_put(&path);
58442 if (retry_estale(res, lookup_flags)) {
58443@@ -391,6 +400,8 @@ retry:
58444 if (error)
58445 goto dput_and_out;
58446
58447+ gr_log_chdir(path.dentry, path.mnt);
58448+
58449 set_fs_pwd(current->fs, &path);
58450
58451 dput_and_out:
58452@@ -420,6 +431,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
58453 goto out_putf;
58454
58455 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
58456+
58457+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
58458+ error = -EPERM;
58459+
58460+ if (!error)
58461+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
58462+
58463 if (!error)
58464 set_fs_pwd(current->fs, &f.file->f_path);
58465 out_putf:
58466@@ -449,7 +467,13 @@ retry:
58467 if (error)
58468 goto dput_and_out;
58469
58470+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
58471+ goto dput_and_out;
58472+
58473 set_fs_root(current->fs, &path);
58474+
58475+ gr_handle_chroot_chdir(&path);
58476+
58477 error = 0;
58478 dput_and_out:
58479 path_put(&path);
58480@@ -471,6 +495,16 @@ static int chmod_common(struct path *path, umode_t mode)
58481 if (error)
58482 return error;
58483 mutex_lock(&inode->i_mutex);
58484+
58485+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
58486+ error = -EACCES;
58487+ goto out_unlock;
58488+ }
58489+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
58490+ error = -EACCES;
58491+ goto out_unlock;
58492+ }
58493+
58494 error = security_path_chmod(path, mode);
58495 if (error)
58496 goto out_unlock;
58497@@ -531,6 +565,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
58498 uid = make_kuid(current_user_ns(), user);
58499 gid = make_kgid(current_user_ns(), group);
58500
58501+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
58502+ return -EACCES;
58503+
58504 newattrs.ia_valid = ATTR_CTIME;
58505 if (user != (uid_t) -1) {
58506 if (!uid_valid(uid))
58507@@ -946,6 +983,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
58508 } else {
58509 fsnotify_open(f);
58510 fd_install(fd, f);
58511+ trace_do_sys_open(tmp->name, flags, mode);
58512 }
58513 }
58514 putname(tmp);
58515diff --git a/fs/pipe.c b/fs/pipe.c
58516index d2c45e1..009fe1c 100644
58517--- a/fs/pipe.c
58518+++ b/fs/pipe.c
58519@@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
58520
58521 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
58522 {
58523- if (pipe->files)
58524+ if (atomic_read(&pipe->files))
58525 mutex_lock_nested(&pipe->mutex, subclass);
58526 }
58527
58528@@ -71,7 +71,7 @@ EXPORT_SYMBOL(pipe_lock);
58529
58530 void pipe_unlock(struct pipe_inode_info *pipe)
58531 {
58532- if (pipe->files)
58533+ if (atomic_read(&pipe->files))
58534 mutex_unlock(&pipe->mutex);
58535 }
58536 EXPORT_SYMBOL(pipe_unlock);
58537@@ -449,9 +449,9 @@ redo:
58538 }
58539 if (bufs) /* More to do? */
58540 continue;
58541- if (!pipe->writers)
58542+ if (!atomic_read(&pipe->writers))
58543 break;
58544- if (!pipe->waiting_writers) {
58545+ if (!atomic_read(&pipe->waiting_writers)) {
58546 /* syscall merging: Usually we must not sleep
58547 * if O_NONBLOCK is set, or if we got some data.
58548 * But if a writer sleeps in kernel space, then
58549@@ -513,7 +513,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
58550 ret = 0;
58551 __pipe_lock(pipe);
58552
58553- if (!pipe->readers) {
58554+ if (!atomic_read(&pipe->readers)) {
58555 send_sig(SIGPIPE, current, 0);
58556 ret = -EPIPE;
58557 goto out;
58558@@ -562,7 +562,7 @@ redo1:
58559 for (;;) {
58560 int bufs;
58561
58562- if (!pipe->readers) {
58563+ if (!atomic_read(&pipe->readers)) {
58564 send_sig(SIGPIPE, current, 0);
58565 if (!ret)
58566 ret = -EPIPE;
58567@@ -653,9 +653,9 @@ redo2:
58568 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
58569 do_wakeup = 0;
58570 }
58571- pipe->waiting_writers++;
58572+ atomic_inc(&pipe->waiting_writers);
58573 pipe_wait(pipe);
58574- pipe->waiting_writers--;
58575+ atomic_dec(&pipe->waiting_writers);
58576 }
58577 out:
58578 __pipe_unlock(pipe);
58579@@ -709,7 +709,7 @@ pipe_poll(struct file *filp, poll_table *wait)
58580 mask = 0;
58581 if (filp->f_mode & FMODE_READ) {
58582 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
58583- if (!pipe->writers && filp->f_version != pipe->w_counter)
58584+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
58585 mask |= POLLHUP;
58586 }
58587
58588@@ -719,7 +719,7 @@ pipe_poll(struct file *filp, poll_table *wait)
58589 * Most Unices do not set POLLERR for FIFOs but on Linux they
58590 * behave exactly like pipes for poll().
58591 */
58592- if (!pipe->readers)
58593+ if (!atomic_read(&pipe->readers))
58594 mask |= POLLERR;
58595 }
58596
58597@@ -734,17 +734,17 @@ pipe_release(struct inode *inode, struct file *file)
58598
58599 __pipe_lock(pipe);
58600 if (file->f_mode & FMODE_READ)
58601- pipe->readers--;
58602+ atomic_dec(&pipe->readers);
58603 if (file->f_mode & FMODE_WRITE)
58604- pipe->writers--;
58605+ atomic_dec(&pipe->writers);
58606
58607- if (pipe->readers || pipe->writers) {
58608+ if (atomic_read(&pipe->readers) || atomic_read(&pipe->writers)) {
58609 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
58610 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
58611 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
58612 }
58613 spin_lock(&inode->i_lock);
58614- if (!--pipe->files) {
58615+ if (atomic_dec_and_test(&pipe->files)) {
58616 inode->i_pipe = NULL;
58617 kill = 1;
58618 }
58619@@ -811,7 +811,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
58620 kfree(pipe);
58621 }
58622
58623-static struct vfsmount *pipe_mnt __read_mostly;
58624+struct vfsmount *pipe_mnt __read_mostly;
58625
58626 /*
58627 * pipefs_dname() is called from d_path().
58628@@ -841,8 +841,9 @@ static struct inode * get_pipe_inode(void)
58629 goto fail_iput;
58630
58631 inode->i_pipe = pipe;
58632- pipe->files = 2;
58633- pipe->readers = pipe->writers = 1;
58634+ atomic_set(&pipe->files, 2);
58635+ atomic_set(&pipe->readers, 1);
58636+ atomic_set(&pipe->writers, 1);
58637 inode->i_fop = &pipefifo_fops;
58638
58639 /*
58640@@ -1022,17 +1023,17 @@ static int fifo_open(struct inode *inode, struct file *filp)
58641 spin_lock(&inode->i_lock);
58642 if (inode->i_pipe) {
58643 pipe = inode->i_pipe;
58644- pipe->files++;
58645+ atomic_inc(&pipe->files);
58646 spin_unlock(&inode->i_lock);
58647 } else {
58648 spin_unlock(&inode->i_lock);
58649 pipe = alloc_pipe_info();
58650 if (!pipe)
58651 return -ENOMEM;
58652- pipe->files = 1;
58653+ atomic_set(&pipe->files, 1);
58654 spin_lock(&inode->i_lock);
58655 if (unlikely(inode->i_pipe)) {
58656- inode->i_pipe->files++;
58657+ atomic_inc(&inode->i_pipe->files);
58658 spin_unlock(&inode->i_lock);
58659 free_pipe_info(pipe);
58660 pipe = inode->i_pipe;
58661@@ -1057,10 +1058,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
58662 * opened, even when there is no process writing the FIFO.
58663 */
58664 pipe->r_counter++;
58665- if (pipe->readers++ == 0)
58666+ if (atomic_inc_return(&pipe->readers) == 1)
58667 wake_up_partner(pipe);
58668
58669- if (!is_pipe && !pipe->writers) {
58670+ if (!is_pipe && !atomic_read(&pipe->writers)) {
58671 if ((filp->f_flags & O_NONBLOCK)) {
58672 /* suppress POLLHUP until we have
58673 * seen a writer */
58674@@ -1079,14 +1080,14 @@ static int fifo_open(struct inode *inode, struct file *filp)
58675 * errno=ENXIO when there is no process reading the FIFO.
58676 */
58677 ret = -ENXIO;
58678- if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
58679+ if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
58680 goto err;
58681
58682 pipe->w_counter++;
58683- if (!pipe->writers++)
58684+ if (atomic_inc_return(&pipe->writers) == 1)
58685 wake_up_partner(pipe);
58686
58687- if (!is_pipe && !pipe->readers) {
58688+ if (!is_pipe && !atomic_read(&pipe->readers)) {
58689 if (wait_for_partner(pipe, &pipe->r_counter))
58690 goto err_wr;
58691 }
58692@@ -1100,11 +1101,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
58693 * the process can at least talk to itself.
58694 */
58695
58696- pipe->readers++;
58697- pipe->writers++;
58698+ atomic_inc(&pipe->readers);
58699+ atomic_inc(&pipe->writers);
58700 pipe->r_counter++;
58701 pipe->w_counter++;
58702- if (pipe->readers == 1 || pipe->writers == 1)
58703+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
58704 wake_up_partner(pipe);
58705 break;
58706
58707@@ -1118,20 +1119,20 @@ static int fifo_open(struct inode *inode, struct file *filp)
58708 return 0;
58709
58710 err_rd:
58711- if (!--pipe->readers)
58712+ if (atomic_dec_and_test(&pipe->readers))
58713 wake_up_interruptible(&pipe->wait);
58714 ret = -ERESTARTSYS;
58715 goto err;
58716
58717 err_wr:
58718- if (!--pipe->writers)
58719+ if (atomic_dec_and_test(&pipe->writers))
58720 wake_up_interruptible(&pipe->wait);
58721 ret = -ERESTARTSYS;
58722 goto err;
58723
58724 err:
58725 spin_lock(&inode->i_lock);
58726- if (!--pipe->files) {
58727+ if (atomic_dec_and_test(&pipe->files)) {
58728 inode->i_pipe = NULL;
58729 kill = 1;
58730 }
58731diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
58732index 15af622..0e9f4467 100644
58733--- a/fs/proc/Kconfig
58734+++ b/fs/proc/Kconfig
58735@@ -30,12 +30,12 @@ config PROC_FS
58736
58737 config PROC_KCORE
58738 bool "/proc/kcore support" if !ARM
58739- depends on PROC_FS && MMU
58740+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
58741
58742 config PROC_VMCORE
58743 bool "/proc/vmcore support"
58744- depends on PROC_FS && CRASH_DUMP
58745- default y
58746+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
58747+ default n
58748 help
58749 Exports the dump image of crashed kernel in ELF format.
58750
58751@@ -59,8 +59,8 @@ config PROC_SYSCTL
58752 limited in memory.
58753
58754 config PROC_PAGE_MONITOR
58755- default y
58756- depends on PROC_FS && MMU
58757+ default n
58758+ depends on PROC_FS && MMU && !GRKERNSEC
58759 bool "Enable /proc page monitoring" if EXPERT
58760 help
58761 Various /proc files exist to monitor process memory utilization:
58762diff --git a/fs/proc/array.c b/fs/proc/array.c
58763index cbd0f1b..adec3f0 100644
58764--- a/fs/proc/array.c
58765+++ b/fs/proc/array.c
58766@@ -60,6 +60,7 @@
58767 #include <linux/tty.h>
58768 #include <linux/string.h>
58769 #include <linux/mman.h>
58770+#include <linux/grsecurity.h>
58771 #include <linux/proc_fs.h>
58772 #include <linux/ioport.h>
58773 #include <linux/uaccess.h>
58774@@ -363,6 +364,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
58775 seq_putc(m, '\n');
58776 }
58777
58778+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58779+static inline void task_pax(struct seq_file *m, struct task_struct *p)
58780+{
58781+ if (p->mm)
58782+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
58783+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
58784+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
58785+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
58786+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
58787+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
58788+ else
58789+ seq_printf(m, "PaX:\t-----\n");
58790+}
58791+#endif
58792+
58793 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
58794 struct pid *pid, struct task_struct *task)
58795 {
58796@@ -381,9 +397,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
58797 task_cpus_allowed(m, task);
58798 cpuset_task_status_allowed(m, task);
58799 task_context_switch_counts(m, task);
58800+
58801+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58802+ task_pax(m, task);
58803+#endif
58804+
58805+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
58806+ task_grsec_rbac(m, task);
58807+#endif
58808+
58809 return 0;
58810 }
58811
58812+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58813+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
58814+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
58815+ _mm->pax_flags & MF_PAX_SEGMEXEC))
58816+#endif
58817+
58818 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
58819 struct pid *pid, struct task_struct *task, int whole)
58820 {
58821@@ -405,6 +436,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
58822 char tcomm[sizeof(task->comm)];
58823 unsigned long flags;
58824
58825+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58826+ if (current->exec_id != m->exec_id) {
58827+ gr_log_badprocpid("stat");
58828+ return 0;
58829+ }
58830+#endif
58831+
58832 state = *get_task_state(task);
58833 vsize = eip = esp = 0;
58834 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
58835@@ -476,6 +514,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
58836 gtime = task_gtime(task);
58837 }
58838
58839+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58840+ if (PAX_RAND_FLAGS(mm)) {
58841+ eip = 0;
58842+ esp = 0;
58843+ wchan = 0;
58844+ }
58845+#endif
58846+#ifdef CONFIG_GRKERNSEC_HIDESYM
58847+ wchan = 0;
58848+ eip =0;
58849+ esp =0;
58850+#endif
58851+
58852 /* scale priority and nice values from timeslices to -20..20 */
58853 /* to make it look like a "normal" Unix priority/nice value */
58854 priority = task_prio(task);
58855@@ -512,9 +563,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
58856 seq_put_decimal_ull(m, ' ', vsize);
58857 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
58858 seq_put_decimal_ull(m, ' ', rsslim);
58859+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58860+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
58861+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
58862+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
58863+#else
58864 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
58865 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
58866 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
58867+#endif
58868 seq_put_decimal_ull(m, ' ', esp);
58869 seq_put_decimal_ull(m, ' ', eip);
58870 /* The signal information here is obsolete.
58871@@ -536,7 +593,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
58872 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
58873 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
58874
58875- if (mm && permitted) {
58876+ if (mm && permitted
58877+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58878+ && !PAX_RAND_FLAGS(mm)
58879+#endif
58880+ ) {
58881 seq_put_decimal_ull(m, ' ', mm->start_data);
58882 seq_put_decimal_ull(m, ' ', mm->end_data);
58883 seq_put_decimal_ull(m, ' ', mm->start_brk);
58884@@ -574,8 +635,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
58885 struct pid *pid, struct task_struct *task)
58886 {
58887 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
58888- struct mm_struct *mm = get_task_mm(task);
58889+ struct mm_struct *mm;
58890
58891+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58892+ if (current->exec_id != m->exec_id) {
58893+ gr_log_badprocpid("statm");
58894+ return 0;
58895+ }
58896+#endif
58897+ mm = get_task_mm(task);
58898 if (mm) {
58899 size = task_statm(mm, &shared, &text, &data, &resident);
58900 mmput(mm);
58901@@ -598,6 +666,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
58902 return 0;
58903 }
58904
58905+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
58906+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
58907+{
58908+ return sprintf(buffer, "%pI4\n", &task->signal->curr_ip);
58909+}
58910+#endif
58911+
58912 #ifdef CONFIG_CHECKPOINT_RESTORE
58913 static struct pid *
58914 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
58915diff --git a/fs/proc/base.c b/fs/proc/base.c
58916index c3834da..b402b2b 100644
58917--- a/fs/proc/base.c
58918+++ b/fs/proc/base.c
58919@@ -113,6 +113,14 @@ struct pid_entry {
58920 union proc_op op;
58921 };
58922
58923+struct getdents_callback {
58924+ struct linux_dirent __user * current_dir;
58925+ struct linux_dirent __user * previous;
58926+ struct file * file;
58927+ int count;
58928+ int error;
58929+};
58930+
58931 #define NOD(NAME, MODE, IOP, FOP, OP) { \
58932 .name = (NAME), \
58933 .len = sizeof(NAME) - 1, \
58934@@ -210,6 +218,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
58935 if (!mm->arg_end)
58936 goto out_mm; /* Shh! No looking before we're done */
58937
58938+ if (gr_acl_handle_procpidmem(task))
58939+ goto out_mm;
58940+
58941 len = mm->arg_end - mm->arg_start;
58942
58943 if (len > PAGE_SIZE)
58944@@ -237,12 +248,28 @@ out:
58945 return res;
58946 }
58947
58948+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58949+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
58950+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
58951+ _mm->pax_flags & MF_PAX_SEGMEXEC))
58952+#endif
58953+
58954 static int proc_pid_auxv(struct task_struct *task, char *buffer)
58955 {
58956 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
58957 int res = PTR_ERR(mm);
58958 if (mm && !IS_ERR(mm)) {
58959 unsigned int nwords = 0;
58960+
58961+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58962+ /* allow if we're currently ptracing this task */
58963+ if (PAX_RAND_FLAGS(mm) &&
58964+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
58965+ mmput(mm);
58966+ return 0;
58967+ }
58968+#endif
58969+
58970 do {
58971 nwords += 2;
58972 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
58973@@ -256,7 +283,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
58974 }
58975
58976
58977-#ifdef CONFIG_KALLSYMS
58978+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
58979 /*
58980 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
58981 * Returns the resolved symbol. If that fails, simply return the address.
58982@@ -295,7 +322,7 @@ static void unlock_trace(struct task_struct *task)
58983 mutex_unlock(&task->signal->cred_guard_mutex);
58984 }
58985
58986-#ifdef CONFIG_STACKTRACE
58987+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
58988
58989 #define MAX_STACK_TRACE_DEPTH 64
58990
58991@@ -518,7 +545,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
58992 return count;
58993 }
58994
58995-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
58996+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
58997 static int proc_pid_syscall(struct task_struct *task, char *buffer)
58998 {
58999 long nr;
59000@@ -547,7 +574,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
59001 /************************************************************************/
59002
59003 /* permission checks */
59004-static int proc_fd_access_allowed(struct inode *inode)
59005+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
59006 {
59007 struct task_struct *task;
59008 int allowed = 0;
59009@@ -557,7 +584,10 @@ static int proc_fd_access_allowed(struct inode *inode)
59010 */
59011 task = get_proc_task(inode);
59012 if (task) {
59013- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
59014+ if (log)
59015+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
59016+ else
59017+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
59018 put_task_struct(task);
59019 }
59020 return allowed;
59021@@ -588,10 +618,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
59022 struct task_struct *task,
59023 int hide_pid_min)
59024 {
59025+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
59026+ return false;
59027+
59028+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59029+ rcu_read_lock();
59030+ {
59031+ const struct cred *tmpcred = current_cred();
59032+ const struct cred *cred = __task_cred(task);
59033+
59034+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
59035+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
59036+ || in_group_p(grsec_proc_gid)
59037+#endif
59038+ ) {
59039+ rcu_read_unlock();
59040+ return true;
59041+ }
59042+ }
59043+ rcu_read_unlock();
59044+
59045+ if (!pid->hide_pid)
59046+ return false;
59047+#endif
59048+
59049 if (pid->hide_pid < hide_pid_min)
59050 return true;
59051 if (in_group_p(pid->pid_gid))
59052 return true;
59053+
59054 return ptrace_may_access(task, PTRACE_MODE_READ);
59055 }
59056
59057@@ -609,7 +664,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
59058 put_task_struct(task);
59059
59060 if (!has_perms) {
59061+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59062+ {
59063+#else
59064 if (pid->hide_pid == 2) {
59065+#endif
59066 /*
59067 * Let's make getdents(), stat(), and open()
59068 * consistent with each other. If a process
59069@@ -707,6 +766,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
59070 if (!task)
59071 return -ESRCH;
59072
59073+ if (gr_acl_handle_procpidmem(task)) {
59074+ put_task_struct(task);
59075+ return -EPERM;
59076+ }
59077+
59078 mm = mm_access(task, mode);
59079 put_task_struct(task);
59080
59081@@ -722,6 +786,10 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
59082
59083 file->private_data = mm;
59084
59085+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59086+ file->f_version = current->exec_id;
59087+#endif
59088+
59089 return 0;
59090 }
59091
59092@@ -743,6 +811,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
59093 ssize_t copied;
59094 char *page;
59095
59096+#ifdef CONFIG_GRKERNSEC
59097+ if (write)
59098+ return -EPERM;
59099+#endif
59100+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59101+ if (file->f_version != current->exec_id) {
59102+ gr_log_badprocpid("mem");
59103+ return 0;
59104+ }
59105+#endif
59106+
59107 if (!mm)
59108 return 0;
59109
59110@@ -755,7 +834,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
59111 goto free;
59112
59113 while (count > 0) {
59114- int this_len = min_t(int, count, PAGE_SIZE);
59115+ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
59116
59117 if (write && copy_from_user(page, buf, this_len)) {
59118 copied = -EFAULT;
59119@@ -847,6 +926,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
59120 if (!mm)
59121 return 0;
59122
59123+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59124+ if (file->f_version != current->exec_id) {
59125+ gr_log_badprocpid("environ");
59126+ return 0;
59127+ }
59128+#endif
59129+
59130 page = (char *)__get_free_page(GFP_TEMPORARY);
59131 if (!page)
59132 return -ENOMEM;
59133@@ -856,7 +942,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
59134 goto free;
59135 while (count > 0) {
59136 size_t this_len, max_len;
59137- int retval;
59138+ ssize_t retval;
59139
59140 if (src >= (mm->env_end - mm->env_start))
59141 break;
59142@@ -1461,7 +1547,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
59143 int error = -EACCES;
59144
59145 /* Are we allowed to snoop on the tasks file descriptors? */
59146- if (!proc_fd_access_allowed(inode))
59147+ if (!proc_fd_access_allowed(inode, 0))
59148 goto out;
59149
59150 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
59151@@ -1505,8 +1591,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
59152 struct path path;
59153
59154 /* Are we allowed to snoop on the tasks file descriptors? */
59155- if (!proc_fd_access_allowed(inode))
59156- goto out;
59157+ /* logging this is needed for learning on chromium to work properly,
59158+ but we don't want to flood the logs from 'ps' which does a readlink
59159+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
59160+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
59161+ */
59162+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
59163+ if (!proc_fd_access_allowed(inode,0))
59164+ goto out;
59165+ } else {
59166+ if (!proc_fd_access_allowed(inode,1))
59167+ goto out;
59168+ }
59169
59170 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
59171 if (error)
59172@@ -1556,7 +1652,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
59173 rcu_read_lock();
59174 cred = __task_cred(task);
59175 inode->i_uid = cred->euid;
59176+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
59177+ inode->i_gid = grsec_proc_gid;
59178+#else
59179 inode->i_gid = cred->egid;
59180+#endif
59181 rcu_read_unlock();
59182 }
59183 security_task_to_inode(task, inode);
59184@@ -1592,10 +1692,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
59185 return -ENOENT;
59186 }
59187 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
59188+#ifdef CONFIG_GRKERNSEC_PROC_USER
59189+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
59190+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59191+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
59192+#endif
59193 task_dumpable(task)) {
59194 cred = __task_cred(task);
59195 stat->uid = cred->euid;
59196+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
59197+ stat->gid = grsec_proc_gid;
59198+#else
59199 stat->gid = cred->egid;
59200+#endif
59201 }
59202 }
59203 rcu_read_unlock();
59204@@ -1633,11 +1742,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
59205
59206 if (task) {
59207 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
59208+#ifdef CONFIG_GRKERNSEC_PROC_USER
59209+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
59210+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59211+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
59212+#endif
59213 task_dumpable(task)) {
59214 rcu_read_lock();
59215 cred = __task_cred(task);
59216 inode->i_uid = cred->euid;
59217+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
59218+ inode->i_gid = grsec_proc_gid;
59219+#else
59220 inode->i_gid = cred->egid;
59221+#endif
59222 rcu_read_unlock();
59223 } else {
59224 inode->i_uid = GLOBAL_ROOT_UID;
59225@@ -2196,6 +2314,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
59226 if (!task)
59227 goto out_no_task;
59228
59229+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
59230+ goto out;
59231+
59232 /*
59233 * Yes, it does not scale. And it should not. Don't add
59234 * new entries into /proc/<tgid>/ without very good reasons.
59235@@ -2240,6 +2361,9 @@ static int proc_pident_readdir(struct file *filp,
59236 if (!task)
59237 goto out_no_task;
59238
59239+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
59240+ goto out;
59241+
59242 ret = 0;
59243 i = filp->f_pos;
59244 switch (i) {
59245@@ -2653,7 +2777,7 @@ static const struct pid_entry tgid_base_stuff[] = {
59246 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
59247 #endif
59248 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
59249-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
59250+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
59251 INF("syscall", S_IRUGO, proc_pid_syscall),
59252 #endif
59253 INF("cmdline", S_IRUGO, proc_pid_cmdline),
59254@@ -2678,10 +2802,10 @@ static const struct pid_entry tgid_base_stuff[] = {
59255 #ifdef CONFIG_SECURITY
59256 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
59257 #endif
59258-#ifdef CONFIG_KALLSYMS
59259+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59260 INF("wchan", S_IRUGO, proc_pid_wchan),
59261 #endif
59262-#ifdef CONFIG_STACKTRACE
59263+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59264 ONE("stack", S_IRUGO, proc_pid_stack),
59265 #endif
59266 #ifdef CONFIG_SCHEDSTATS
59267@@ -2715,6 +2839,9 @@ static const struct pid_entry tgid_base_stuff[] = {
59268 #ifdef CONFIG_HARDWALL
59269 INF("hardwall", S_IRUGO, proc_pid_hardwall),
59270 #endif
59271+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
59272+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
59273+#endif
59274 #ifdef CONFIG_USER_NS
59275 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
59276 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
59277@@ -2847,7 +2974,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
59278 if (!inode)
59279 goto out;
59280
59281+#ifdef CONFIG_GRKERNSEC_PROC_USER
59282+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
59283+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59284+ inode->i_gid = grsec_proc_gid;
59285+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
59286+#else
59287 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
59288+#endif
59289 inode->i_op = &proc_tgid_base_inode_operations;
59290 inode->i_fop = &proc_tgid_base_operations;
59291 inode->i_flags|=S_IMMUTABLE;
59292@@ -2885,7 +3019,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
59293 if (!task)
59294 goto out;
59295
59296+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
59297+ goto out_put_task;
59298+
59299 result = proc_pid_instantiate(dir, dentry, task, NULL);
59300+out_put_task:
59301 put_task_struct(task);
59302 out:
59303 return result;
59304@@ -2948,6 +3086,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
59305 static int fake_filldir(void *buf, const char *name, int namelen,
59306 loff_t offset, u64 ino, unsigned d_type)
59307 {
59308+ struct getdents_callback * __buf = (struct getdents_callback *) buf;
59309+ __buf->error = -EINVAL;
59310 return 0;
59311 }
59312
59313@@ -3007,7 +3147,7 @@ static const struct pid_entry tid_base_stuff[] = {
59314 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
59315 #endif
59316 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
59317-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
59318+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
59319 INF("syscall", S_IRUGO, proc_pid_syscall),
59320 #endif
59321 INF("cmdline", S_IRUGO, proc_pid_cmdline),
59322@@ -3034,10 +3174,10 @@ static const struct pid_entry tid_base_stuff[] = {
59323 #ifdef CONFIG_SECURITY
59324 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
59325 #endif
59326-#ifdef CONFIG_KALLSYMS
59327+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59328 INF("wchan", S_IRUGO, proc_pid_wchan),
59329 #endif
59330-#ifdef CONFIG_STACKTRACE
59331+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59332 ONE("stack", S_IRUGO, proc_pid_stack),
59333 #endif
59334 #ifdef CONFIG_SCHEDSTATS
59335diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
59336index 82676e3..5f8518a 100644
59337--- a/fs/proc/cmdline.c
59338+++ b/fs/proc/cmdline.c
59339@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
59340
59341 static int __init proc_cmdline_init(void)
59342 {
59343+#ifdef CONFIG_GRKERNSEC_PROC_ADD
59344+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
59345+#else
59346 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
59347+#endif
59348 return 0;
59349 }
59350 module_init(proc_cmdline_init);
59351diff --git a/fs/proc/devices.c b/fs/proc/devices.c
59352index b143471..bb105e5 100644
59353--- a/fs/proc/devices.c
59354+++ b/fs/proc/devices.c
59355@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
59356
59357 static int __init proc_devices_init(void)
59358 {
59359+#ifdef CONFIG_GRKERNSEC_PROC_ADD
59360+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
59361+#else
59362 proc_create("devices", 0, NULL, &proc_devinfo_operations);
59363+#endif
59364 return 0;
59365 }
59366 module_init(proc_devices_init);
59367diff --git a/fs/proc/fd.c b/fs/proc/fd.c
59368index d7a4a28..0201742 100644
59369--- a/fs/proc/fd.c
59370+++ b/fs/proc/fd.c
59371@@ -25,7 +25,8 @@ static int seq_show(struct seq_file *m, void *v)
59372 if (!task)
59373 return -ENOENT;
59374
59375- files = get_files_struct(task);
59376+ if (!gr_acl_handle_procpidmem(task))
59377+ files = get_files_struct(task);
59378 put_task_struct(task);
59379
59380 if (files) {
59381@@ -302,11 +303,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
59382 */
59383 int proc_fd_permission(struct inode *inode, int mask)
59384 {
59385+ struct task_struct *task;
59386 int rv = generic_permission(inode, mask);
59387- if (rv == 0)
59388- return 0;
59389+
59390 if (task_pid(current) == proc_pid(inode))
59391 rv = 0;
59392+
59393+ task = get_proc_task(inode);
59394+ if (task == NULL)
59395+ return rv;
59396+
59397+ if (gr_acl_handle_procpidmem(task))
59398+ rv = -EACCES;
59399+
59400+ put_task_struct(task);
59401+
59402 return rv;
59403 }
59404
59405diff --git a/fs/proc/inode.c b/fs/proc/inode.c
59406index 073aea6..0630370 100644
59407--- a/fs/proc/inode.c
59408+++ b/fs/proc/inode.c
59409@@ -23,11 +23,17 @@
59410 #include <linux/slab.h>
59411 #include <linux/mount.h>
59412 #include <linux/magic.h>
59413+#include <linux/grsecurity.h>
59414
59415 #include <asm/uaccess.h>
59416
59417 #include "internal.h"
59418
59419+#ifdef CONFIG_PROC_SYSCTL
59420+extern const struct inode_operations proc_sys_inode_operations;
59421+extern const struct inode_operations proc_sys_dir_operations;
59422+#endif
59423+
59424 static void proc_evict_inode(struct inode *inode)
59425 {
59426 struct proc_dir_entry *de;
59427@@ -55,6 +61,13 @@ static void proc_evict_inode(struct inode *inode)
59428 ns = PROC_I(inode)->ns.ns;
59429 if (ns_ops && ns)
59430 ns_ops->put(ns);
59431+
59432+#ifdef CONFIG_PROC_SYSCTL
59433+ if (inode->i_op == &proc_sys_inode_operations ||
59434+ inode->i_op == &proc_sys_dir_operations)
59435+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
59436+#endif
59437+
59438 }
59439
59440 static struct kmem_cache * proc_inode_cachep;
59441@@ -385,7 +398,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
59442 if (de->mode) {
59443 inode->i_mode = de->mode;
59444 inode->i_uid = de->uid;
59445+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
59446+ inode->i_gid = grsec_proc_gid;
59447+#else
59448 inode->i_gid = de->gid;
59449+#endif
59450 }
59451 if (de->size)
59452 inode->i_size = de->size;
59453diff --git a/fs/proc/internal.h b/fs/proc/internal.h
59454index d600fb0..3b495fe 100644
59455--- a/fs/proc/internal.h
59456+++ b/fs/proc/internal.h
59457@@ -155,6 +155,9 @@ extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
59458 struct pid *, struct task_struct *);
59459 extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
59460 struct pid *, struct task_struct *);
59461+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
59462+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
59463+#endif
59464
59465 /*
59466 * base.c
59467diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
59468index 0a22194..a9fc8c1 100644
59469--- a/fs/proc/kcore.c
59470+++ b/fs/proc/kcore.c
59471@@ -484,9 +484,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
59472 * the addresses in the elf_phdr on our list.
59473 */
59474 start = kc_offset_to_vaddr(*fpos - elf_buflen);
59475- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
59476+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
59477+ if (tsz > buflen)
59478 tsz = buflen;
59479-
59480+
59481 while (buflen) {
59482 struct kcore_list *m;
59483
59484@@ -515,20 +516,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
59485 kfree(elf_buf);
59486 } else {
59487 if (kern_addr_valid(start)) {
59488- unsigned long n;
59489+ char *elf_buf;
59490+ mm_segment_t oldfs;
59491
59492- n = copy_to_user(buffer, (char *)start, tsz);
59493- /*
59494- * We cannot distinguish between fault on source
59495- * and fault on destination. When this happens
59496- * we clear too and hope it will trigger the
59497- * EFAULT again.
59498- */
59499- if (n) {
59500- if (clear_user(buffer + tsz - n,
59501- n))
59502+ elf_buf = kmalloc(tsz, GFP_KERNEL);
59503+ if (!elf_buf)
59504+ return -ENOMEM;
59505+ oldfs = get_fs();
59506+ set_fs(KERNEL_DS);
59507+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
59508+ set_fs(oldfs);
59509+ if (copy_to_user(buffer, elf_buf, tsz)) {
59510+ kfree(elf_buf);
59511 return -EFAULT;
59512+ }
59513 }
59514+ set_fs(oldfs);
59515+ kfree(elf_buf);
59516 } else {
59517 if (clear_user(buffer, tsz))
59518 return -EFAULT;
59519@@ -548,6 +552,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
59520
59521 static int open_kcore(struct inode *inode, struct file *filp)
59522 {
59523+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
59524+ return -EPERM;
59525+#endif
59526 if (!capable(CAP_SYS_RAWIO))
59527 return -EPERM;
59528 if (kcore_need_update)
59529diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
59530index 5aa847a..f77c8d4 100644
59531--- a/fs/proc/meminfo.c
59532+++ b/fs/proc/meminfo.c
59533@@ -159,7 +159,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
59534 vmi.used >> 10,
59535 vmi.largest_chunk >> 10
59536 #ifdef CONFIG_MEMORY_FAILURE
59537- ,atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
59538+ ,atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
59539 #endif
59540 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
59541 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
59542diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
59543index ccfd99b..1b7e255 100644
59544--- a/fs/proc/nommu.c
59545+++ b/fs/proc/nommu.c
59546@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
59547 if (len < 1)
59548 len = 1;
59549 seq_printf(m, "%*c", len, ' ');
59550- seq_path(m, &file->f_path, "");
59551+ seq_path(m, &file->f_path, "\n\\");
59552 }
59553
59554 seq_putc(m, '\n');
59555diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
59556index 986e832..6e8e859 100644
59557--- a/fs/proc/proc_net.c
59558+++ b/fs/proc/proc_net.c
59559@@ -23,6 +23,7 @@
59560 #include <linux/nsproxy.h>
59561 #include <net/net_namespace.h>
59562 #include <linux/seq_file.h>
59563+#include <linux/grsecurity.h>
59564
59565 #include "internal.h"
59566
59567@@ -109,6 +110,17 @@ static struct net *get_proc_task_net(struct inode *dir)
59568 struct task_struct *task;
59569 struct nsproxy *ns;
59570 struct net *net = NULL;
59571+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59572+ const struct cred *cred = current_cred();
59573+#endif
59574+
59575+#ifdef CONFIG_GRKERNSEC_PROC_USER
59576+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
59577+ return net;
59578+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59579+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
59580+ return net;
59581+#endif
59582
59583 rcu_read_lock();
59584 task = pid_task(proc_pid(dir), PIDTYPE_PID);
59585diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
59586index ac05f33..1e6dc7e 100644
59587--- a/fs/proc/proc_sysctl.c
59588+++ b/fs/proc/proc_sysctl.c
59589@@ -13,11 +13,15 @@
59590 #include <linux/module.h>
59591 #include "internal.h"
59592
59593+extern int gr_handle_chroot_sysctl(const int op);
59594+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
59595+ const int op);
59596+
59597 static const struct dentry_operations proc_sys_dentry_operations;
59598 static const struct file_operations proc_sys_file_operations;
59599-static const struct inode_operations proc_sys_inode_operations;
59600+const struct inode_operations proc_sys_inode_operations;
59601 static const struct file_operations proc_sys_dir_file_operations;
59602-static const struct inode_operations proc_sys_dir_operations;
59603+const struct inode_operations proc_sys_dir_operations;
59604
59605 void proc_sys_poll_notify(struct ctl_table_poll *poll)
59606 {
59607@@ -467,6 +471,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
59608
59609 err = NULL;
59610 d_set_d_op(dentry, &proc_sys_dentry_operations);
59611+
59612+ gr_handle_proc_create(dentry, inode);
59613+
59614 d_add(dentry, inode);
59615
59616 out:
59617@@ -482,6 +489,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
59618 struct inode *inode = file_inode(filp);
59619 struct ctl_table_header *head = grab_header(inode);
59620 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
59621+ int op = write ? MAY_WRITE : MAY_READ;
59622 ssize_t error;
59623 size_t res;
59624
59625@@ -493,7 +501,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
59626 * and won't be until we finish.
59627 */
59628 error = -EPERM;
59629- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
59630+ if (sysctl_perm(head, table, op))
59631 goto out;
59632
59633 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
59634@@ -501,6 +509,22 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
59635 if (!table->proc_handler)
59636 goto out;
59637
59638+#ifdef CONFIG_GRKERNSEC
59639+ error = -EPERM;
59640+ if (gr_handle_chroot_sysctl(op))
59641+ goto out;
59642+ dget(filp->f_path.dentry);
59643+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
59644+ dput(filp->f_path.dentry);
59645+ goto out;
59646+ }
59647+ dput(filp->f_path.dentry);
59648+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
59649+ goto out;
59650+ if (write && !capable(CAP_SYS_ADMIN))
59651+ goto out;
59652+#endif
59653+
59654 /* careful: calling conventions are nasty here */
59655 res = count;
59656 error = table->proc_handler(table, write, buf, &res, ppos);
59657@@ -598,6 +622,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
59658 return -ENOMEM;
59659 } else {
59660 d_set_d_op(child, &proc_sys_dentry_operations);
59661+
59662+ gr_handle_proc_create(child, inode);
59663+
59664 d_add(child, inode);
59665 }
59666 } else {
59667@@ -641,6 +668,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
59668 if ((*pos)++ < file->f_pos)
59669 return 0;
59670
59671+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
59672+ return 0;
59673+
59674 if (unlikely(S_ISLNK(table->mode)))
59675 res = proc_sys_link_fill_cache(file, dirent, filldir, head, table);
59676 else
59677@@ -751,6 +781,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
59678 if (IS_ERR(head))
59679 return PTR_ERR(head);
59680
59681+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
59682+ return -ENOENT;
59683+
59684 generic_fillattr(inode, stat);
59685 if (table)
59686 stat->mode = (stat->mode & S_IFMT) | table->mode;
59687@@ -773,13 +806,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
59688 .llseek = generic_file_llseek,
59689 };
59690
59691-static const struct inode_operations proc_sys_inode_operations = {
59692+const struct inode_operations proc_sys_inode_operations = {
59693 .permission = proc_sys_permission,
59694 .setattr = proc_sys_setattr,
59695 .getattr = proc_sys_getattr,
59696 };
59697
59698-static const struct inode_operations proc_sys_dir_operations = {
59699+const struct inode_operations proc_sys_dir_operations = {
59700 .lookup = proc_sys_lookup,
59701 .permission = proc_sys_permission,
59702 .setattr = proc_sys_setattr,
59703@@ -855,7 +888,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
59704 static struct ctl_dir *new_dir(struct ctl_table_set *set,
59705 const char *name, int namelen)
59706 {
59707- struct ctl_table *table;
59708+ ctl_table_no_const *table;
59709 struct ctl_dir *new;
59710 struct ctl_node *node;
59711 char *new_name;
59712@@ -867,7 +900,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
59713 return NULL;
59714
59715 node = (struct ctl_node *)(new + 1);
59716- table = (struct ctl_table *)(node + 1);
59717+ table = (ctl_table_no_const *)(node + 1);
59718 new_name = (char *)(table + 2);
59719 memcpy(new_name, name, namelen);
59720 new_name[namelen] = '\0';
59721@@ -1036,7 +1069,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
59722 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
59723 struct ctl_table_root *link_root)
59724 {
59725- struct ctl_table *link_table, *entry, *link;
59726+ ctl_table_no_const *link_table, *link;
59727+ struct ctl_table *entry;
59728 struct ctl_table_header *links;
59729 struct ctl_node *node;
59730 char *link_name;
59731@@ -1059,7 +1093,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
59732 return NULL;
59733
59734 node = (struct ctl_node *)(links + 1);
59735- link_table = (struct ctl_table *)(node + nr_entries);
59736+ link_table = (ctl_table_no_const *)(node + nr_entries);
59737 link_name = (char *)&link_table[nr_entries + 1];
59738
59739 for (link = link_table, entry = table; entry->procname; link++, entry++) {
59740@@ -1307,8 +1341,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
59741 struct ctl_table_header ***subheader, struct ctl_table_set *set,
59742 struct ctl_table *table)
59743 {
59744- struct ctl_table *ctl_table_arg = NULL;
59745- struct ctl_table *entry, *files;
59746+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
59747+ struct ctl_table *entry;
59748 int nr_files = 0;
59749 int nr_dirs = 0;
59750 int err = -ENOMEM;
59751@@ -1320,10 +1354,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
59752 nr_files++;
59753 }
59754
59755- files = table;
59756 /* If there are mixed files and directories we need a new table */
59757 if (nr_dirs && nr_files) {
59758- struct ctl_table *new;
59759+ ctl_table_no_const *new;
59760 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
59761 GFP_KERNEL);
59762 if (!files)
59763@@ -1341,7 +1374,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
59764 /* Register everything except a directory full of subdirectories */
59765 if (nr_files || !nr_dirs) {
59766 struct ctl_table_header *header;
59767- header = __register_sysctl_table(set, path, files);
59768+ header = __register_sysctl_table(set, path, files ? files : table);
59769 if (!header) {
59770 kfree(ctl_table_arg);
59771 goto out;
59772diff --git a/fs/proc/root.c b/fs/proc/root.c
59773index 41a6ea9..23eaa92 100644
59774--- a/fs/proc/root.c
59775+++ b/fs/proc/root.c
59776@@ -182,7 +182,15 @@ void __init proc_root_init(void)
59777 #ifdef CONFIG_PROC_DEVICETREE
59778 proc_device_tree_init();
59779 #endif
59780+#ifdef CONFIG_GRKERNSEC_PROC_ADD
59781+#ifdef CONFIG_GRKERNSEC_PROC_USER
59782+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
59783+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59784+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
59785+#endif
59786+#else
59787 proc_mkdir("bus", NULL);
59788+#endif
59789 proc_sys_init();
59790 }
59791
59792diff --git a/fs/proc/self.c b/fs/proc/self.c
59793index 6b6a993..807cccc 100644
59794--- a/fs/proc/self.c
59795+++ b/fs/proc/self.c
59796@@ -39,7 +39,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
59797 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
59798 void *cookie)
59799 {
59800- char *s = nd_get_link(nd);
59801+ const char *s = nd_get_link(nd);
59802 if (!IS_ERR(s))
59803 kfree(s);
59804 }
59805diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
59806index 65fc60a..350cc48 100644
59807--- a/fs/proc/task_mmu.c
59808+++ b/fs/proc/task_mmu.c
59809@@ -11,12 +11,19 @@
59810 #include <linux/rmap.h>
59811 #include <linux/swap.h>
59812 #include <linux/swapops.h>
59813+#include <linux/grsecurity.h>
59814
59815 #include <asm/elf.h>
59816 #include <asm/uaccess.h>
59817 #include <asm/tlbflush.h>
59818 #include "internal.h"
59819
59820+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59821+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
59822+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
59823+ _mm->pax_flags & MF_PAX_SEGMEXEC))
59824+#endif
59825+
59826 void task_mem(struct seq_file *m, struct mm_struct *mm)
59827 {
59828 unsigned long data, text, lib, swap;
59829@@ -52,8 +59,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
59830 "VmExe:\t%8lu kB\n"
59831 "VmLib:\t%8lu kB\n"
59832 "VmPTE:\t%8lu kB\n"
59833- "VmSwap:\t%8lu kB\n",
59834- hiwater_vm << (PAGE_SHIFT-10),
59835+ "VmSwap:\t%8lu kB\n"
59836+
59837+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
59838+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
59839+#endif
59840+
59841+ ,hiwater_vm << (PAGE_SHIFT-10),
59842 total_vm << (PAGE_SHIFT-10),
59843 mm->locked_vm << (PAGE_SHIFT-10),
59844 mm->pinned_vm << (PAGE_SHIFT-10),
59845@@ -62,7 +74,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
59846 data << (PAGE_SHIFT-10),
59847 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
59848 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
59849- swap << (PAGE_SHIFT-10));
59850+ swap << (PAGE_SHIFT-10)
59851+
59852+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
59853+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59854+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
59855+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
59856+#else
59857+ , mm->context.user_cs_base
59858+ , mm->context.user_cs_limit
59859+#endif
59860+#endif
59861+
59862+ );
59863 }
59864
59865 unsigned long task_vsize(struct mm_struct *mm)
59866@@ -277,13 +301,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
59867 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
59868 }
59869
59870- /* We don't show the stack guard page in /proc/maps */
59871+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59872+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
59873+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
59874+#else
59875 start = vma->vm_start;
59876- if (stack_guard_page_start(vma, start))
59877- start += PAGE_SIZE;
59878 end = vma->vm_end;
59879- if (stack_guard_page_end(vma, end))
59880- end -= PAGE_SIZE;
59881+#endif
59882
59883 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
59884 start,
59885@@ -292,7 +316,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
59886 flags & VM_WRITE ? 'w' : '-',
59887 flags & VM_EXEC ? 'x' : '-',
59888 flags & VM_MAYSHARE ? 's' : 'p',
59889+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59890+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
59891+#else
59892 pgoff,
59893+#endif
59894 MAJOR(dev), MINOR(dev), ino, &len);
59895
59896 /*
59897@@ -301,7 +329,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
59898 */
59899 if (file) {
59900 pad_len_spaces(m, len);
59901- seq_path(m, &file->f_path, "\n");
59902+ seq_path(m, &file->f_path, "\n\\");
59903 goto done;
59904 }
59905
59906@@ -327,8 +355,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
59907 * Thread stack in /proc/PID/task/TID/maps or
59908 * the main process stack.
59909 */
59910- if (!is_pid || (vma->vm_start <= mm->start_stack &&
59911- vma->vm_end >= mm->start_stack)) {
59912+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
59913+ (vma->vm_start <= mm->start_stack &&
59914+ vma->vm_end >= mm->start_stack)) {
59915 name = "[stack]";
59916 } else {
59917 /* Thread stack in /proc/PID/maps */
59918@@ -352,6 +381,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
59919 struct proc_maps_private *priv = m->private;
59920 struct task_struct *task = priv->task;
59921
59922+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59923+ if (current->exec_id != m->exec_id) {
59924+ gr_log_badprocpid("maps");
59925+ return 0;
59926+ }
59927+#endif
59928+
59929 show_map_vma(m, vma, is_pid);
59930
59931 if (m->count < m->size) /* vma is copied successfully */
59932@@ -589,12 +625,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
59933 .private = &mss,
59934 };
59935
59936+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59937+ if (current->exec_id != m->exec_id) {
59938+ gr_log_badprocpid("smaps");
59939+ return 0;
59940+ }
59941+#endif
59942 memset(&mss, 0, sizeof mss);
59943- mss.vma = vma;
59944- /* mmap_sem is held in m_start */
59945- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
59946- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
59947-
59948+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59949+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
59950+#endif
59951+ mss.vma = vma;
59952+ /* mmap_sem is held in m_start */
59953+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
59954+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
59955+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59956+ }
59957+#endif
59958 show_map_vma(m, vma, is_pid);
59959
59960 seq_printf(m,
59961@@ -612,7 +659,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
59962 "KernelPageSize: %8lu kB\n"
59963 "MMUPageSize: %8lu kB\n"
59964 "Locked: %8lu kB\n",
59965+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59966+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
59967+#else
59968 (vma->vm_end - vma->vm_start) >> 10,
59969+#endif
59970 mss.resident >> 10,
59971 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
59972 mss.shared_clean >> 10,
59973@@ -1264,6 +1315,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
59974 int n;
59975 char buffer[50];
59976
59977+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59978+ if (current->exec_id != m->exec_id) {
59979+ gr_log_badprocpid("numa_maps");
59980+ return 0;
59981+ }
59982+#endif
59983+
59984 if (!mm)
59985 return 0;
59986
59987@@ -1281,11 +1339,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
59988 mpol_to_str(buffer, sizeof(buffer), pol);
59989 mpol_cond_put(pol);
59990
59991+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59992+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
59993+#else
59994 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
59995+#endif
59996
59997 if (file) {
59998 seq_printf(m, " file=");
59999- seq_path(m, &file->f_path, "\n\t= ");
60000+ seq_path(m, &file->f_path, "\n\t\\= ");
60001 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
60002 seq_printf(m, " heap");
60003 } else {
60004diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
60005index 56123a6..5a2f6ec 100644
60006--- a/fs/proc/task_nommu.c
60007+++ b/fs/proc/task_nommu.c
60008@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
60009 else
60010 bytes += kobjsize(mm);
60011
60012- if (current->fs && current->fs->users > 1)
60013+ if (current->fs && atomic_read(&current->fs->users) > 1)
60014 sbytes += kobjsize(current->fs);
60015 else
60016 bytes += kobjsize(current->fs);
60017@@ -168,7 +168,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
60018
60019 if (file) {
60020 pad_len_spaces(m, len);
60021- seq_path(m, &file->f_path, "");
60022+ seq_path(m, &file->f_path, "\n\\");
60023 } else if (mm) {
60024 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
60025
60026diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
60027index 17f7e08..e4b1529 100644
60028--- a/fs/proc/vmcore.c
60029+++ b/fs/proc/vmcore.c
60030@@ -99,9 +99,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
60031 nr_bytes = count;
60032
60033 /* If pfn is not ram, return zeros for sparse dump files */
60034- if (pfn_is_ram(pfn) == 0)
60035- memset(buf, 0, nr_bytes);
60036- else {
60037+ if (pfn_is_ram(pfn) == 0) {
60038+ if (userbuf) {
60039+ if (clear_user((char __force_user *)buf, nr_bytes))
60040+ return -EFAULT;
60041+ } else
60042+ memset(buf, 0, nr_bytes);
60043+ } else {
60044 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
60045 offset, userbuf);
60046 if (tmp < 0)
60047@@ -186,7 +190,7 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer,
60048 if (tsz > nr_bytes)
60049 tsz = nr_bytes;
60050
60051- tmp = read_from_oldmem(buffer, tsz, &start, 1);
60052+ tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, 1);
60053 if (tmp < 0)
60054 return tmp;
60055 buflen -= tsz;
60056diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
60057index b00fcc9..e0c6381 100644
60058--- a/fs/qnx6/qnx6.h
60059+++ b/fs/qnx6/qnx6.h
60060@@ -74,7 +74,7 @@ enum {
60061 BYTESEX_BE,
60062 };
60063
60064-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
60065+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
60066 {
60067 if (sbi->s_bytesex == BYTESEX_LE)
60068 return le64_to_cpu((__force __le64)n);
60069@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
60070 return (__force __fs64)cpu_to_be64(n);
60071 }
60072
60073-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
60074+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
60075 {
60076 if (sbi->s_bytesex == BYTESEX_LE)
60077 return le32_to_cpu((__force __le32)n);
60078diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
60079index 16e8abb..2dcf914 100644
60080--- a/fs/quota/netlink.c
60081+++ b/fs/quota/netlink.c
60082@@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
60083 void quota_send_warning(struct kqid qid, dev_t dev,
60084 const char warntype)
60085 {
60086- static atomic_t seq;
60087+ static atomic_unchecked_t seq;
60088 struct sk_buff *skb;
60089 void *msg_head;
60090 int ret;
60091@@ -49,7 +49,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
60092 "VFS: Not enough memory to send quota warning.\n");
60093 return;
60094 }
60095- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
60096+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
60097 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
60098 if (!msg_head) {
60099 printk(KERN_ERR
60100diff --git a/fs/read_write.c b/fs/read_write.c
60101index 2cefa41..c7e2fe0 100644
60102--- a/fs/read_write.c
60103+++ b/fs/read_write.c
60104@@ -411,7 +411,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
60105
60106 old_fs = get_fs();
60107 set_fs(get_ds());
60108- p = (__force const char __user *)buf;
60109+ p = (const char __force_user *)buf;
60110 if (count > MAX_RW_COUNT)
60111 count = MAX_RW_COUNT;
60112 if (file->f_op->write)
60113diff --git a/fs/readdir.c b/fs/readdir.c
60114index fee38e0..12fdf47 100644
60115--- a/fs/readdir.c
60116+++ b/fs/readdir.c
60117@@ -17,6 +17,7 @@
60118 #include <linux/security.h>
60119 #include <linux/syscalls.h>
60120 #include <linux/unistd.h>
60121+#include <linux/namei.h>
60122
60123 #include <asm/uaccess.h>
60124
60125@@ -67,6 +68,7 @@ struct old_linux_dirent {
60126
60127 struct readdir_callback {
60128 struct old_linux_dirent __user * dirent;
60129+ struct file * file;
60130 int result;
60131 };
60132
60133@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
60134 buf->result = -EOVERFLOW;
60135 return -EOVERFLOW;
60136 }
60137+
60138+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60139+ return 0;
60140+
60141 buf->result++;
60142 dirent = buf->dirent;
60143 if (!access_ok(VERIFY_WRITE, dirent,
60144@@ -114,6 +120,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
60145
60146 buf.result = 0;
60147 buf.dirent = dirent;
60148+ buf.file = f.file;
60149
60150 error = vfs_readdir(f.file, fillonedir, &buf);
60151 if (buf.result)
60152@@ -139,6 +146,7 @@ struct linux_dirent {
60153 struct getdents_callback {
60154 struct linux_dirent __user * current_dir;
60155 struct linux_dirent __user * previous;
60156+ struct file * file;
60157 int count;
60158 int error;
60159 };
60160@@ -160,6 +168,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
60161 buf->error = -EOVERFLOW;
60162 return -EOVERFLOW;
60163 }
60164+
60165+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60166+ return 0;
60167+
60168 dirent = buf->previous;
60169 if (dirent) {
60170 if (__put_user(offset, &dirent->d_off))
60171@@ -205,6 +217,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
60172 buf.previous = NULL;
60173 buf.count = count;
60174 buf.error = 0;
60175+ buf.file = f.file;
60176
60177 error = vfs_readdir(f.file, filldir, &buf);
60178 if (error >= 0)
60179@@ -223,6 +236,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
60180 struct getdents_callback64 {
60181 struct linux_dirent64 __user * current_dir;
60182 struct linux_dirent64 __user * previous;
60183+ struct file *file;
60184 int count;
60185 int error;
60186 };
60187@@ -238,6 +252,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
60188 buf->error = -EINVAL; /* only used if we fail.. */
60189 if (reclen > buf->count)
60190 return -EINVAL;
60191+
60192+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60193+ return 0;
60194+
60195 dirent = buf->previous;
60196 if (dirent) {
60197 if (__put_user(offset, &dirent->d_off))
60198@@ -283,6 +301,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
60199
60200 buf.current_dir = dirent;
60201 buf.previous = NULL;
60202+ buf.file = f.file;
60203 buf.count = count;
60204 buf.error = 0;
60205
60206@@ -291,7 +310,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
60207 error = buf.error;
60208 lastdirent = buf.previous;
60209 if (lastdirent) {
60210- typeof(lastdirent->d_off) d_off = f.file->f_pos;
60211+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
60212 if (__put_user(d_off, &lastdirent->d_off))
60213 error = -EFAULT;
60214 else
60215diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
60216index 2b7882b..1c5ef48 100644
60217--- a/fs/reiserfs/do_balan.c
60218+++ b/fs/reiserfs/do_balan.c
60219@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
60220 return;
60221 }
60222
60223- atomic_inc(&(fs_generation(tb->tb_sb)));
60224+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
60225 do_balance_starts(tb);
60226
60227 /* balance leaf returns 0 except if combining L R and S into
60228diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
60229index 1d48974..2f8f4e0 100644
60230--- a/fs/reiserfs/procfs.c
60231+++ b/fs/reiserfs/procfs.c
60232@@ -114,7 +114,7 @@ static int show_super(struct seq_file *m, void *unused)
60233 "SMALL_TAILS " : "NO_TAILS ",
60234 replay_only(sb) ? "REPLAY_ONLY " : "",
60235 convert_reiserfs(sb) ? "CONV " : "",
60236- atomic_read(&r->s_generation_counter),
60237+ atomic_read_unchecked(&r->s_generation_counter),
60238 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
60239 SF(s_do_balance), SF(s_unneeded_left_neighbor),
60240 SF(s_good_search_by_key_reada), SF(s_bmaps),
60241diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
60242index 157e474..65a6114 100644
60243--- a/fs/reiserfs/reiserfs.h
60244+++ b/fs/reiserfs/reiserfs.h
60245@@ -453,7 +453,7 @@ struct reiserfs_sb_info {
60246 /* Comment? -Hans */
60247 wait_queue_head_t s_wait;
60248 /* To be obsoleted soon by per buffer seals.. -Hans */
60249- atomic_t s_generation_counter; // increased by one every time the
60250+ atomic_unchecked_t s_generation_counter; // increased by one every time the
60251 // tree gets re-balanced
60252 unsigned long s_properties; /* File system properties. Currently holds
60253 on-disk FS format */
60254@@ -1978,7 +1978,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
60255 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
60256
60257 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
60258-#define get_generation(s) atomic_read (&fs_generation(s))
60259+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
60260 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
60261 #define __fs_changed(gen,s) (gen != get_generation (s))
60262 #define fs_changed(gen,s) \
60263diff --git a/fs/select.c b/fs/select.c
60264index 8c1c96c..a0f9b6d 100644
60265--- a/fs/select.c
60266+++ b/fs/select.c
60267@@ -20,6 +20,7 @@
60268 #include <linux/export.h>
60269 #include <linux/slab.h>
60270 #include <linux/poll.h>
60271+#include <linux/security.h>
60272 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
60273 #include <linux/file.h>
60274 #include <linux/fdtable.h>
60275@@ -827,6 +828,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
60276 struct poll_list *walk = head;
60277 unsigned long todo = nfds;
60278
60279+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
60280 if (nfds > rlimit(RLIMIT_NOFILE))
60281 return -EINVAL;
60282
60283diff --git a/fs/seq_file.c b/fs/seq_file.c
60284index 774c1eb..b67582a 100644
60285--- a/fs/seq_file.c
60286+++ b/fs/seq_file.c
60287@@ -10,6 +10,7 @@
60288 #include <linux/seq_file.h>
60289 #include <linux/slab.h>
60290 #include <linux/cred.h>
60291+#include <linux/sched.h>
60292
60293 #include <asm/uaccess.h>
60294 #include <asm/page.h>
60295@@ -60,6 +61,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
60296 #ifdef CONFIG_USER_NS
60297 p->user_ns = file->f_cred->user_ns;
60298 #endif
60299+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60300+ p->exec_id = current->exec_id;
60301+#endif
60302
60303 /*
60304 * Wrappers around seq_open(e.g. swaps_open) need to be
60305@@ -96,7 +100,7 @@ static int traverse(struct seq_file *m, loff_t offset)
60306 return 0;
60307 }
60308 if (!m->buf) {
60309- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
60310+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
60311 if (!m->buf)
60312 return -ENOMEM;
60313 }
60314@@ -136,7 +140,7 @@ static int traverse(struct seq_file *m, loff_t offset)
60315 Eoverflow:
60316 m->op->stop(m, p);
60317 kfree(m->buf);
60318- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
60319+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
60320 return !m->buf ? -ENOMEM : -EAGAIN;
60321 }
60322
60323@@ -191,7 +195,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
60324
60325 /* grab buffer if we didn't have one */
60326 if (!m->buf) {
60327- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
60328+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
60329 if (!m->buf)
60330 goto Enomem;
60331 }
60332@@ -232,7 +236,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
60333 goto Fill;
60334 m->op->stop(m, p);
60335 kfree(m->buf);
60336- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
60337+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
60338 if (!m->buf)
60339 goto Enomem;
60340 m->count = 0;
60341@@ -581,7 +585,7 @@ static void single_stop(struct seq_file *p, void *v)
60342 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
60343 void *data)
60344 {
60345- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
60346+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
60347 int res = -ENOMEM;
60348
60349 if (op) {
60350diff --git a/fs/splice.c b/fs/splice.c
60351index d37431d..81c3044 100644
60352--- a/fs/splice.c
60353+++ b/fs/splice.c
60354@@ -196,7 +196,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
60355 pipe_lock(pipe);
60356
60357 for (;;) {
60358- if (!pipe->readers) {
60359+ if (!atomic_read(&pipe->readers)) {
60360 send_sig(SIGPIPE, current, 0);
60361 if (!ret)
60362 ret = -EPIPE;
60363@@ -219,7 +219,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
60364 page_nr++;
60365 ret += buf->len;
60366
60367- if (pipe->files)
60368+ if (atomic_read(&pipe->files))
60369 do_wakeup = 1;
60370
60371 if (!--spd->nr_pages)
60372@@ -250,9 +250,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
60373 do_wakeup = 0;
60374 }
60375
60376- pipe->waiting_writers++;
60377+ atomic_inc(&pipe->waiting_writers);
60378 pipe_wait(pipe);
60379- pipe->waiting_writers--;
60380+ atomic_dec(&pipe->waiting_writers);
60381 }
60382
60383 pipe_unlock(pipe);
60384@@ -565,7 +565,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
60385 old_fs = get_fs();
60386 set_fs(get_ds());
60387 /* The cast to a user pointer is valid due to the set_fs() */
60388- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
60389+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
60390 set_fs(old_fs);
60391
60392 return res;
60393@@ -580,7 +580,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
60394 old_fs = get_fs();
60395 set_fs(get_ds());
60396 /* The cast to a user pointer is valid due to the set_fs() */
60397- res = vfs_write(file, (__force const char __user *)buf, count, &pos);
60398+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
60399 set_fs(old_fs);
60400
60401 return res;
60402@@ -633,7 +633,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
60403 goto err;
60404
60405 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
60406- vec[i].iov_base = (void __user *) page_address(page);
60407+ vec[i].iov_base = (void __force_user *) page_address(page);
60408 vec[i].iov_len = this_len;
60409 spd.pages[i] = page;
60410 spd.nr_pages++;
60411@@ -829,7 +829,7 @@ int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_desc *sd,
60412 ops->release(pipe, buf);
60413 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
60414 pipe->nrbufs--;
60415- if (pipe->files)
60416+ if (atomic_read(&pipe->files))
60417 sd->need_wakeup = true;
60418 }
60419
60420@@ -854,10 +854,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
60421 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
60422 {
60423 while (!pipe->nrbufs) {
60424- if (!pipe->writers)
60425+ if (!atomic_read(&pipe->writers))
60426 return 0;
60427
60428- if (!pipe->waiting_writers && sd->num_spliced)
60429+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
60430 return 0;
60431
60432 if (sd->flags & SPLICE_F_NONBLOCK)
60433@@ -1193,7 +1193,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
60434 * out of the pipe right after the splice_to_pipe(). So set
60435 * PIPE_READERS appropriately.
60436 */
60437- pipe->readers = 1;
60438+ atomic_set(&pipe->readers, 1);
60439
60440 current->splice_pipe = pipe;
60441 }
60442@@ -1769,9 +1769,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
60443 ret = -ERESTARTSYS;
60444 break;
60445 }
60446- if (!pipe->writers)
60447+ if (!atomic_read(&pipe->writers))
60448 break;
60449- if (!pipe->waiting_writers) {
60450+ if (!atomic_read(&pipe->waiting_writers)) {
60451 if (flags & SPLICE_F_NONBLOCK) {
60452 ret = -EAGAIN;
60453 break;
60454@@ -1803,7 +1803,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
60455 pipe_lock(pipe);
60456
60457 while (pipe->nrbufs >= pipe->buffers) {
60458- if (!pipe->readers) {
60459+ if (!atomic_read(&pipe->readers)) {
60460 send_sig(SIGPIPE, current, 0);
60461 ret = -EPIPE;
60462 break;
60463@@ -1816,9 +1816,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
60464 ret = -ERESTARTSYS;
60465 break;
60466 }
60467- pipe->waiting_writers++;
60468+ atomic_inc(&pipe->waiting_writers);
60469 pipe_wait(pipe);
60470- pipe->waiting_writers--;
60471+ atomic_dec(&pipe->waiting_writers);
60472 }
60473
60474 pipe_unlock(pipe);
60475@@ -1854,14 +1854,14 @@ retry:
60476 pipe_double_lock(ipipe, opipe);
60477
60478 do {
60479- if (!opipe->readers) {
60480+ if (!atomic_read(&opipe->readers)) {
60481 send_sig(SIGPIPE, current, 0);
60482 if (!ret)
60483 ret = -EPIPE;
60484 break;
60485 }
60486
60487- if (!ipipe->nrbufs && !ipipe->writers)
60488+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
60489 break;
60490
60491 /*
60492@@ -1958,7 +1958,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
60493 pipe_double_lock(ipipe, opipe);
60494
60495 do {
60496- if (!opipe->readers) {
60497+ if (!atomic_read(&opipe->readers)) {
60498 send_sig(SIGPIPE, current, 0);
60499 if (!ret)
60500 ret = -EPIPE;
60501@@ -2003,7 +2003,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
60502 * return EAGAIN if we have the potential of some data in the
60503 * future, otherwise just return 0
60504 */
60505- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
60506+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
60507 ret = -EAGAIN;
60508
60509 pipe_unlock(ipipe);
60510diff --git a/fs/stat.c b/fs/stat.c
60511index 04ce1ac..a13dd1e 100644
60512--- a/fs/stat.c
60513+++ b/fs/stat.c
60514@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
60515 stat->gid = inode->i_gid;
60516 stat->rdev = inode->i_rdev;
60517 stat->size = i_size_read(inode);
60518- stat->atime = inode->i_atime;
60519- stat->mtime = inode->i_mtime;
60520+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
60521+ stat->atime = inode->i_ctime;
60522+ stat->mtime = inode->i_ctime;
60523+ } else {
60524+ stat->atime = inode->i_atime;
60525+ stat->mtime = inode->i_mtime;
60526+ }
60527 stat->ctime = inode->i_ctime;
60528 stat->blksize = (1 << inode->i_blkbits);
60529 stat->blocks = inode->i_blocks;
60530@@ -46,8 +51,14 @@ int vfs_getattr(struct path *path, struct kstat *stat)
60531 if (retval)
60532 return retval;
60533
60534- if (inode->i_op->getattr)
60535- return inode->i_op->getattr(path->mnt, path->dentry, stat);
60536+ if (inode->i_op->getattr) {
60537+ retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
60538+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
60539+ stat->atime = stat->ctime;
60540+ stat->mtime = stat->ctime;
60541+ }
60542+ return retval;
60543+ }
60544
60545 generic_fillattr(inode, stat);
60546 return 0;
60547diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
60548index 15c68f9..36a8b3e 100644
60549--- a/fs/sysfs/bin.c
60550+++ b/fs/sysfs/bin.c
60551@@ -235,13 +235,13 @@ static int bin_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
60552 return ret;
60553 }
60554
60555-static int bin_access(struct vm_area_struct *vma, unsigned long addr,
60556- void *buf, int len, int write)
60557+static ssize_t bin_access(struct vm_area_struct *vma, unsigned long addr,
60558+ void *buf, size_t len, int write)
60559 {
60560 struct file *file = vma->vm_file;
60561 struct bin_buffer *bb = file->private_data;
60562 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
60563- int ret;
60564+ ssize_t ret;
60565
60566 if (!bb->vm_ops)
60567 return -EINVAL;
60568diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
60569index e8e0e71..79c28ac5 100644
60570--- a/fs/sysfs/dir.c
60571+++ b/fs/sysfs/dir.c
60572@@ -40,7 +40,7 @@ static DEFINE_IDA(sysfs_ino_ida);
60573 *
60574 * Returns 31 bit hash of ns + name (so it fits in an off_t )
60575 */
60576-static unsigned int sysfs_name_hash(const void *ns, const char *name)
60577+static unsigned int sysfs_name_hash(const void *ns, const unsigned char *name)
60578 {
60579 unsigned long hash = init_name_hash();
60580 unsigned int len = strlen(name);
60581@@ -679,6 +679,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
60582 struct sysfs_dirent *sd;
60583 int rc;
60584
60585+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
60586+ const char *parent_name = parent_sd->s_name;
60587+
60588+ mode = S_IFDIR | S_IRWXU;
60589+
60590+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
60591+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
60592+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
60593+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
60594+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
60595+#endif
60596+
60597 /* allocate */
60598 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
60599 if (!sd)
60600diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
60601index 602f56d..6853db8 100644
60602--- a/fs/sysfs/file.c
60603+++ b/fs/sysfs/file.c
60604@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
60605
60606 struct sysfs_open_dirent {
60607 atomic_t refcnt;
60608- atomic_t event;
60609+ atomic_unchecked_t event;
60610 wait_queue_head_t poll;
60611 struct list_head buffers; /* goes through sysfs_buffer.list */
60612 };
60613@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
60614 if (!sysfs_get_active(attr_sd))
60615 return -ENODEV;
60616
60617- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
60618+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
60619 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
60620
60621 sysfs_put_active(attr_sd);
60622@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
60623 return -ENOMEM;
60624
60625 atomic_set(&new_od->refcnt, 0);
60626- atomic_set(&new_od->event, 1);
60627+ atomic_set_unchecked(&new_od->event, 1);
60628 init_waitqueue_head(&new_od->poll);
60629 INIT_LIST_HEAD(&new_od->buffers);
60630 goto retry;
60631@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
60632
60633 sysfs_put_active(attr_sd);
60634
60635- if (buffer->event != atomic_read(&od->event))
60636+ if (buffer->event != atomic_read_unchecked(&od->event))
60637 goto trigger;
60638
60639 return DEFAULT_POLLMASK;
60640@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
60641
60642 od = sd->s_attr.open;
60643 if (od) {
60644- atomic_inc(&od->event);
60645+ atomic_inc_unchecked(&od->event);
60646 wake_up_interruptible(&od->poll);
60647 }
60648
60649diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
60650index 8c940df..25b733e 100644
60651--- a/fs/sysfs/symlink.c
60652+++ b/fs/sysfs/symlink.c
60653@@ -305,7 +305,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
60654
60655 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
60656 {
60657- char *page = nd_get_link(nd);
60658+ const char *page = nd_get_link(nd);
60659 if (!IS_ERR(page))
60660 free_page((unsigned long)page);
60661 }
60662diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
60663index 69d4889..a810bd4 100644
60664--- a/fs/sysv/sysv.h
60665+++ b/fs/sysv/sysv.h
60666@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
60667 #endif
60668 }
60669
60670-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
60671+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
60672 {
60673 if (sbi->s_bytesex == BYTESEX_PDP)
60674 return PDP_swab((__force __u32)n);
60675diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
60676index e18b988..f1d4ad0f 100644
60677--- a/fs/ubifs/io.c
60678+++ b/fs/ubifs/io.c
60679@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
60680 return err;
60681 }
60682
60683-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
60684+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
60685 {
60686 int err;
60687
60688diff --git a/fs/udf/misc.c b/fs/udf/misc.c
60689index c175b4d..8f36a16 100644
60690--- a/fs/udf/misc.c
60691+++ b/fs/udf/misc.c
60692@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
60693
60694 u8 udf_tag_checksum(const struct tag *t)
60695 {
60696- u8 *data = (u8 *)t;
60697+ const u8 *data = (const u8 *)t;
60698 u8 checksum = 0;
60699 int i;
60700 for (i = 0; i < sizeof(struct tag); ++i)
60701diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
60702index 8d974c4..b82f6ec 100644
60703--- a/fs/ufs/swab.h
60704+++ b/fs/ufs/swab.h
60705@@ -22,7 +22,7 @@ enum {
60706 BYTESEX_BE
60707 };
60708
60709-static inline u64
60710+static inline u64 __intentional_overflow(-1)
60711 fs64_to_cpu(struct super_block *sbp, __fs64 n)
60712 {
60713 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
60714@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
60715 return (__force __fs64)cpu_to_be64(n);
60716 }
60717
60718-static inline u32
60719+static inline u32 __intentional_overflow(-1)
60720 fs32_to_cpu(struct super_block *sbp, __fs32 n)
60721 {
60722 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
60723diff --git a/fs/utimes.c b/fs/utimes.c
60724index f4fb7ec..3fe03c0 100644
60725--- a/fs/utimes.c
60726+++ b/fs/utimes.c
60727@@ -1,6 +1,7 @@
60728 #include <linux/compiler.h>
60729 #include <linux/file.h>
60730 #include <linux/fs.h>
60731+#include <linux/security.h>
60732 #include <linux/linkage.h>
60733 #include <linux/mount.h>
60734 #include <linux/namei.h>
60735@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
60736 goto mnt_drop_write_and_out;
60737 }
60738 }
60739+
60740+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
60741+ error = -EACCES;
60742+ goto mnt_drop_write_and_out;
60743+ }
60744+
60745 mutex_lock(&inode->i_mutex);
60746 error = notify_change(path->dentry, &newattrs);
60747 mutex_unlock(&inode->i_mutex);
60748diff --git a/fs/xattr.c b/fs/xattr.c
60749index 3377dff..4d074d9 100644
60750--- a/fs/xattr.c
60751+++ b/fs/xattr.c
60752@@ -227,6 +227,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
60753 return rc;
60754 }
60755
60756+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
60757+ssize_t
60758+pax_getxattr(struct dentry *dentry, void *value, size_t size)
60759+{
60760+ struct inode *inode = dentry->d_inode;
60761+ ssize_t error;
60762+
60763+ error = inode_permission(inode, MAY_EXEC);
60764+ if (error)
60765+ return error;
60766+
60767+ if (inode->i_op->getxattr)
60768+ error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
60769+ else
60770+ error = -EOPNOTSUPP;
60771+
60772+ return error;
60773+}
60774+EXPORT_SYMBOL(pax_getxattr);
60775+#endif
60776+
60777 ssize_t
60778 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
60779 {
60780@@ -319,7 +340,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
60781 * Extended attribute SET operations
60782 */
60783 static long
60784-setxattr(struct dentry *d, const char __user *name, const void __user *value,
60785+setxattr(struct path *path, const char __user *name, const void __user *value,
60786 size_t size, int flags)
60787 {
60788 int error;
60789@@ -355,7 +376,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
60790 posix_acl_fix_xattr_from_user(kvalue, size);
60791 }
60792
60793- error = vfs_setxattr(d, kname, kvalue, size, flags);
60794+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
60795+ error = -EACCES;
60796+ goto out;
60797+ }
60798+
60799+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
60800 out:
60801 if (vvalue)
60802 vfree(vvalue);
60803@@ -377,7 +403,7 @@ retry:
60804 return error;
60805 error = mnt_want_write(path.mnt);
60806 if (!error) {
60807- error = setxattr(path.dentry, name, value, size, flags);
60808+ error = setxattr(&path, name, value, size, flags);
60809 mnt_drop_write(path.mnt);
60810 }
60811 path_put(&path);
60812@@ -401,7 +427,7 @@ retry:
60813 return error;
60814 error = mnt_want_write(path.mnt);
60815 if (!error) {
60816- error = setxattr(path.dentry, name, value, size, flags);
60817+ error = setxattr(&path, name, value, size, flags);
60818 mnt_drop_write(path.mnt);
60819 }
60820 path_put(&path);
60821@@ -416,16 +442,14 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
60822 const void __user *,value, size_t, size, int, flags)
60823 {
60824 struct fd f = fdget(fd);
60825- struct dentry *dentry;
60826 int error = -EBADF;
60827
60828 if (!f.file)
60829 return error;
60830- dentry = f.file->f_path.dentry;
60831- audit_inode(NULL, dentry, 0);
60832+ audit_inode(NULL, f.file->f_path.dentry, 0);
60833 error = mnt_want_write_file(f.file);
60834 if (!error) {
60835- error = setxattr(dentry, name, value, size, flags);
60836+ error = setxattr(&f.file->f_path, name, value, size, flags);
60837 mnt_drop_write_file(f.file);
60838 }
60839 fdput(f);
60840diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
60841index 9fbea87..6b19972 100644
60842--- a/fs/xattr_acl.c
60843+++ b/fs/xattr_acl.c
60844@@ -76,8 +76,8 @@ struct posix_acl *
60845 posix_acl_from_xattr(struct user_namespace *user_ns,
60846 const void *value, size_t size)
60847 {
60848- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
60849- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
60850+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
60851+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
60852 int count;
60853 struct posix_acl *acl;
60854 struct posix_acl_entry *acl_e;
60855diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
60856index 8904284..ee0e14b 100644
60857--- a/fs/xfs/xfs_bmap.c
60858+++ b/fs/xfs/xfs_bmap.c
60859@@ -765,7 +765,7 @@ xfs_bmap_validate_ret(
60860
60861 #else
60862 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
60863-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
60864+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
60865 #endif /* DEBUG */
60866
60867 /*
60868diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
60869index 6157424..ac98f6d 100644
60870--- a/fs/xfs/xfs_dir2_sf.c
60871+++ b/fs/xfs/xfs_dir2_sf.c
60872@@ -851,7 +851,15 @@ xfs_dir2_sf_getdents(
60873 }
60874
60875 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
60876- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
60877+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
60878+ char name[sfep->namelen];
60879+ memcpy(name, sfep->name, sfep->namelen);
60880+ if (filldir(dirent, name, sfep->namelen,
60881+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
60882+ *offset = off & 0x7fffffff;
60883+ return 0;
60884+ }
60885+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
60886 off & 0x7fffffff, ino, DT_UNKNOWN)) {
60887 *offset = off & 0x7fffffff;
60888 return 0;
60889diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
60890index 5e99968..45bd327 100644
60891--- a/fs/xfs/xfs_ioctl.c
60892+++ b/fs/xfs/xfs_ioctl.c
60893@@ -127,7 +127,7 @@ xfs_find_handle(
60894 }
60895
60896 error = -EFAULT;
60897- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
60898+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
60899 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
60900 goto out_put;
60901
60902diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
60903index ca9ecaa..60100c7 100644
60904--- a/fs/xfs/xfs_iops.c
60905+++ b/fs/xfs/xfs_iops.c
60906@@ -395,7 +395,7 @@ xfs_vn_put_link(
60907 struct nameidata *nd,
60908 void *p)
60909 {
60910- char *s = nd_get_link(nd);
60911+ const char *s = nd_get_link(nd);
60912
60913 if (!IS_ERR(s))
60914 kfree(s);
60915diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
60916new file mode 100644
e2b79cd1 60917index 0000000..76e84b9
bb5f0bf8
AF
60918--- /dev/null
60919+++ b/grsecurity/Kconfig
e2b79cd1 60920@@ -0,0 +1,1063 @@
bb5f0bf8
AF
60921+#
60922+# grecurity configuration
60923+#
60924+menu "Memory Protections"
60925+depends on GRKERNSEC
60926+
60927+config GRKERNSEC_KMEM
60928+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
60929+ default y if GRKERNSEC_CONFIG_AUTO
60930+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
60931+ help
60932+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
60933+ be written to or read from to modify or leak the contents of the running
60934+ kernel. /dev/port will also not be allowed to be opened and support
60935+ for /dev/cpu/*/msr will be removed. If you have module
60936+ support disabled, enabling this will close up five ways that are
60937+ currently used to insert malicious code into the running kernel.
60938+
60939+ Even with all these features enabled, we still highly recommend that
60940+ you use the RBAC system, as it is still possible for an attacker to
60941+ modify the running kernel through privileged I/O granted by ioperm/iopl.
60942+
60943+ If you are not using XFree86, you may be able to stop this additional
60944+ case by enabling the 'Disable privileged I/O' option. Though nothing
60945+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
60946+ but only to video memory, which is the only writing we allow in this
60947+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
60948+ not be allowed to mprotect it with PROT_WRITE later.
60949+ Enabling this feature will prevent the "cpupower" and "powertop" tools
60950+ from working.
60951+
60952+ It is highly recommended that you say Y here if you meet all the
60953+ conditions above.
60954+
60955+config GRKERNSEC_VM86
60956+ bool "Restrict VM86 mode"
60957+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
60958+ depends on X86_32
60959+
60960+ help
60961+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
60962+ make use of a special execution mode on 32bit x86 processors called
60963+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
60964+ video cards and will still work with this option enabled. The purpose
60965+ of the option is to prevent exploitation of emulation errors in
60966+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
60967+ Nearly all users should be able to enable this option.
60968+
60969+config GRKERNSEC_IO
60970+ bool "Disable privileged I/O"
60971+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
60972+ depends on X86
60973+ select RTC_CLASS
60974+ select RTC_INTF_DEV
60975+ select RTC_DRV_CMOS
60976+
60977+ help
60978+ If you say Y here, all ioperm and iopl calls will return an error.
60979+ Ioperm and iopl can be used to modify the running kernel.
60980+ Unfortunately, some programs need this access to operate properly,
60981+ the most notable of which are XFree86 and hwclock. hwclock can be
60982+ remedied by having RTC support in the kernel, so real-time
60983+ clock support is enabled if this option is enabled, to ensure
60984+ that hwclock operates correctly. XFree86 still will not
60985+ operate correctly with this option enabled, so DO NOT CHOOSE Y
60986+ IF YOU USE XFree86. If you use XFree86 and you still want to
60987+ protect your kernel against modification, use the RBAC system.
60988+
60989+config GRKERNSEC_JIT_HARDEN
60990+ bool "Harden BPF JIT against spray attacks"
60991+ default y if GRKERNSEC_CONFIG_AUTO
60992+ depends on BPF_JIT
60993+ help
60994+ If you say Y here, the native code generated by the kernel's Berkeley
60995+ Packet Filter (BPF) JIT engine will be hardened against JIT-spraying
60996+ attacks that attempt to fit attacker-beneficial instructions in
60997+ 32bit immediate fields of JIT-generated native instructions. The
60998+ attacker will generally aim to cause an unintended instruction sequence
60999+ of JIT-generated native code to execute by jumping into the middle of
61000+ a generated instruction. This feature effectively randomizes the 32bit
61001+ immediate constants present in the generated code to thwart such attacks.
61002+
61003+ If you're using KERNEXEC, it's recommended that you enable this option
61004+ to supplement the hardening of the kernel.
61005+
61006+config GRKERNSEC_PERF_HARDEN
61007+ bool "Disable unprivileged PERF_EVENTS usage by default"
61008+ default y if GRKERNSEC_CONFIG_AUTO
61009+ depends on PERF_EVENTS
61010+ help
61011+ If you say Y here, the range of acceptable values for the
61012+ /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
61013+ default to a new value: 3. When the sysctl is set to this value, no
61014+ unprivileged use of the PERF_EVENTS syscall interface will be permitted.
61015+
61016+ Though PERF_EVENTS can be used legitimately for performance monitoring
61017+ and low-level application profiling, it is forced on regardless of
61018+ configuration, has been at fault for several vulnerabilities, and
61019+ creates new opportunities for side channels and other information leaks.
61020+
61021+ This feature puts PERF_EVENTS into a secure default state and permits
61022+ the administrator to change out of it temporarily if unprivileged
61023+ application profiling is needed.
61024+
61025+config GRKERNSEC_RAND_THREADSTACK
61026+ bool "Insert random gaps between thread stacks"
61027+ default y if GRKERNSEC_CONFIG_AUTO
61028+ depends on PAX_RANDMMAP && !PPC
61029+ help
61030+ If you say Y here, a random-sized gap will be enforced between allocated
61031+ thread stacks. Glibc's NPTL and other threading libraries that
61032+ pass MAP_STACK to the kernel for thread stack allocation are supported.
61033+ The implementation currently provides 8 bits of entropy for the gap.
61034+
61035+ Many distributions do not compile threaded remote services with the
61036+ -fstack-check argument to GCC, causing the variable-sized stack-based
61037+ allocator, alloca(), to not probe the stack on allocation. This
61038+ permits an unbounded alloca() to skip over any guard page and potentially
61039+ modify another thread's stack reliably. An enforced random gap
61040+ reduces the reliability of such an attack and increases the chance
61041+ that such a read/write to another thread's stack instead lands in
61042+ an unmapped area, causing a crash and triggering grsecurity's
61043+ anti-bruteforcing logic.
61044+
61045+config GRKERNSEC_PROC_MEMMAP
61046+ bool "Harden ASLR against information leaks and entropy reduction"
61047+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
61048+ depends on PAX_NOEXEC || PAX_ASLR
61049+ help
61050+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
61051+ give no information about the addresses of its mappings if
61052+ PaX features that rely on random addresses are enabled on the task.
61053+ In addition to sanitizing this information and disabling other
61054+ dangerous sources of information, this option causes reads of sensitive
61055+ /proc/<pid> entries where the file descriptor was opened in a different
61056+ task than the one performing the read. Such attempts are logged.
61057+ This option also limits argv/env strings for suid/sgid binaries
61058+ to 512KB to prevent a complete exhaustion of the stack entropy provided
61059+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
61060+ binaries to prevent alternative mmap layouts from being abused.
61061+
61062+ If you use PaX it is essential that you say Y here as it closes up
61063+ several holes that make full ASLR useless locally.
61064+
61065+config GRKERNSEC_BRUTE
61066+ bool "Deter exploit bruteforcing"
61067+ default y if GRKERNSEC_CONFIG_AUTO
61068+ help
61069+ If you say Y here, attempts to bruteforce exploits against forking
61070+ daemons such as apache or sshd, as well as against suid/sgid binaries
61071+ will be deterred. When a child of a forking daemon is killed by PaX
61072+ or crashes due to an illegal instruction or other suspicious signal,
61073+ the parent process will be delayed 30 seconds upon every subsequent
61074+ fork until the administrator is able to assess the situation and
61075+ restart the daemon.
61076+ In the suid/sgid case, the attempt is logged, the user has all their
61077+ existing instances of the suid/sgid binary terminated and will
61078+ be unable to execute any suid/sgid binaries for 15 minutes.
61079+
61080+ It is recommended that you also enable signal logging in the auditing
61081+ section so that logs are generated when a process triggers a suspicious
61082+ signal.
61083+ If the sysctl option is enabled, a sysctl option with name
61084+ "deter_bruteforce" is created.
61085+
61086+
61087+config GRKERNSEC_MODHARDEN
61088+ bool "Harden module auto-loading"
61089+ default y if GRKERNSEC_CONFIG_AUTO
61090+ depends on MODULES
61091+ help
61092+ If you say Y here, module auto-loading in response to use of some
61093+ feature implemented by an unloaded module will be restricted to
61094+ root users. Enabling this option helps defend against attacks
61095+ by unprivileged users who abuse the auto-loading behavior to
61096+ cause a vulnerable module to load that is then exploited.
61097+
61098+ If this option prevents a legitimate use of auto-loading for a
61099+ non-root user, the administrator can execute modprobe manually
61100+ with the exact name of the module mentioned in the alert log.
61101+ Alternatively, the administrator can add the module to the list
61102+ of modules loaded at boot by modifying init scripts.
61103+
61104+ Modification of init scripts will most likely be needed on
61105+ Ubuntu servers with encrypted home directory support enabled,
61106+ as the first non-root user logging in will cause the ecb(aes),
61107+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
61108+
61109+config GRKERNSEC_HIDESYM
61110+ bool "Hide kernel symbols"
61111+ default y if GRKERNSEC_CONFIG_AUTO
61112+ select PAX_USERCOPY_SLABS
61113+ help
61114+ If you say Y here, getting information on loaded modules, and
61115+ displaying all kernel symbols through a syscall will be restricted
61116+ to users with CAP_SYS_MODULE. For software compatibility reasons,
61117+ /proc/kallsyms will be restricted to the root user. The RBAC
61118+ system can hide that entry even from root.
61119+
61120+ This option also prevents leaking of kernel addresses through
61121+ several /proc entries.
61122+
61123+ Note that this option is only effective provided the following
61124+ conditions are met:
61125+ 1) The kernel using grsecurity is not precompiled by some distribution
61126+ 2) You have also enabled GRKERNSEC_DMESG
61127+ 3) You are using the RBAC system and hiding other files such as your
61128+ kernel image and System.map. Alternatively, enabling this option
61129+ causes the permissions on /boot, /lib/modules, and the kernel
61130+ source directory to change at compile time to prevent
61131+ reading by non-root users.
61132+ If the above conditions are met, this option will aid in providing a
61133+ useful protection against local kernel exploitation of overflows
61134+ and arbitrary read/write vulnerabilities.
61135+
61136+ It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
61137+ in addition to this feature.
61138+
61139+config GRKERNSEC_KERN_LOCKOUT
61140+ bool "Active kernel exploit response"
61141+ default y if GRKERNSEC_CONFIG_AUTO
61142+ depends on X86 || ARM || PPC || SPARC
61143+ help
61144+ If you say Y here, when a PaX alert is triggered due to suspicious
61145+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
61146+ or an OOPS occurs due to bad memory accesses, instead of just
61147+ terminating the offending process (and potentially allowing
61148+ a subsequent exploit from the same user), we will take one of two
61149+ actions:
61150+ If the user was root, we will panic the system
61151+ If the user was non-root, we will log the attempt, terminate
61152+ all processes owned by the user, then prevent them from creating
61153+ any new processes until the system is restarted
61154+ This deters repeated kernel exploitation/bruteforcing attempts
61155+ and is useful for later forensics.
61156+
61157+endmenu
61158+menu "Role Based Access Control Options"
61159+depends on GRKERNSEC
61160+
61161+config GRKERNSEC_RBAC_DEBUG
61162+ bool
61163+
61164+config GRKERNSEC_NO_RBAC
61165+ bool "Disable RBAC system"
61166+ help
61167+ If you say Y here, the /dev/grsec device will be removed from the kernel,
61168+ preventing the RBAC system from being enabled. You should only say Y
61169+ here if you have no intention of using the RBAC system, so as to prevent
61170+ an attacker with root access from misusing the RBAC system to hide files
61171+ and processes when loadable module support and /dev/[k]mem have been
61172+ locked down.
61173+
61174+config GRKERNSEC_ACL_HIDEKERN
61175+ bool "Hide kernel processes"
61176+ help
61177+ If you say Y here, all kernel threads will be hidden to all
61178+ processes but those whose subject has the "view hidden processes"
61179+ flag.
61180+
61181+config GRKERNSEC_ACL_MAXTRIES
61182+ int "Maximum tries before password lockout"
61183+ default 3
61184+ help
61185+ This option enforces the maximum number of times a user can attempt
61186+ to authorize themselves with the grsecurity RBAC system before being
61187+ denied the ability to attempt authorization again for a specified time.
61188+ The lower the number, the harder it will be to brute-force a password.
61189+
61190+config GRKERNSEC_ACL_TIMEOUT
61191+ int "Time to wait after max password tries, in seconds"
61192+ default 30
61193+ help
61194+ This option specifies the time the user must wait after attempting to
61195+ authorize to the RBAC system with the maximum number of invalid
61196+ passwords. The higher the number, the harder it will be to brute-force
61197+ a password.
61198+
61199+endmenu
61200+menu "Filesystem Protections"
61201+depends on GRKERNSEC
61202+
61203+config GRKERNSEC_PROC
61204+ bool "Proc restrictions"
61205+ default y if GRKERNSEC_CONFIG_AUTO
61206+ help
61207+ If you say Y here, the permissions of the /proc filesystem
61208+ will be altered to enhance system security and privacy. You MUST
61209+ choose either a user only restriction or a user and group restriction.
61210+ Depending upon the option you choose, you can either restrict users to
61211+ see only the processes they themselves run, or choose a group that can
61212+ view all processes and files normally restricted to root if you choose
61213+ the "restrict to user only" option. NOTE: If you're running identd or
61214+ ntpd as a non-root user, you will have to run it as the group you
61215+ specify here.
61216+
61217+config GRKERNSEC_PROC_USER
61218+ bool "Restrict /proc to user only"
61219+ depends on GRKERNSEC_PROC
61220+ help
61221+ If you say Y here, non-root users will only be able to view their own
61222+ processes, and restricts them from viewing network-related information,
61223+ and viewing kernel symbol and module information.
61224+
61225+config GRKERNSEC_PROC_USERGROUP
61226+ bool "Allow special group"
61227+ default y if GRKERNSEC_CONFIG_AUTO
61228+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
61229+ help
61230+ If you say Y here, you will be able to select a group that will be
61231+ able to view all processes and network-related information. If you've
61232+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
61233+ remain hidden. This option is useful if you want to run identd as
61234+ a non-root user. The group you select may also be chosen at boot time
61235+ via "grsec_proc_gid=" on the kernel commandline.
61236+
61237+config GRKERNSEC_PROC_GID
61238+ int "GID for special group"
61239+ depends on GRKERNSEC_PROC_USERGROUP
61240+ default 1001
61241+
61242+config GRKERNSEC_PROC_ADD
61243+ bool "Additional restrictions"
61244+ default y if GRKERNSEC_CONFIG_AUTO
61245+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
61246+ help
61247+ If you say Y here, additional restrictions will be placed on
61248+ /proc that keep normal users from viewing device information and
61249+ slabinfo information that could be useful for exploits.
61250+
61251+config GRKERNSEC_LINK
61252+ bool "Linking restrictions"
61253+ default y if GRKERNSEC_CONFIG_AUTO
61254+ help
61255+ If you say Y here, /tmp race exploits will be prevented, since users
61256+ will no longer be able to follow symlinks owned by other users in
61257+ world-writable +t directories (e.g. /tmp), unless the owner of the
61258+ symlink is the owner of the directory. users will also not be
61259+ able to hardlink to files they do not own. If the sysctl option is
61260+ enabled, a sysctl option with name "linking_restrictions" is created.
61261+
61262+config GRKERNSEC_SYMLINKOWN
61263+ bool "Kernel-enforced SymlinksIfOwnerMatch"
61264+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
61265+ help
61266+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
61267+ that prevents it from being used as a security feature. As Apache
61268+ verifies the symlink by performing a stat() against the target of
61269+ the symlink before it is followed, an attacker can setup a symlink
61270+ to point to a same-owned file, then replace the symlink with one
61271+ that targets another user's file just after Apache "validates" the
61272+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
61273+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
61274+ will be in place for the group you specify. If the sysctl option
61275+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
61276+ created.
61277+
61278+config GRKERNSEC_SYMLINKOWN_GID
61279+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
61280+ depends on GRKERNSEC_SYMLINKOWN
61281+ default 1006
61282+ help
61283+ Setting this GID determines what group kernel-enforced
61284+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
61285+ is enabled, a sysctl option with name "symlinkown_gid" is created.
61286+
61287+config GRKERNSEC_FIFO
61288+ bool "FIFO restrictions"
61289+ default y if GRKERNSEC_CONFIG_AUTO
61290+ help
61291+ If you say Y here, users will not be able to write to FIFOs they don't
61292+ own in world-writable +t directories (e.g. /tmp), unless the owner of
61293+ the FIFO is the same owner of the directory it's held in. If the sysctl
61294+ option is enabled, a sysctl option with name "fifo_restrictions" is
61295+ created.
61296+
61297+config GRKERNSEC_SYSFS_RESTRICT
61298+ bool "Sysfs/debugfs restriction"
61299+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
61300+ depends on SYSFS
61301+ help
61302+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
61303+ any filesystem normally mounted under it (e.g. debugfs) will be
61304+ mostly accessible only by root. These filesystems generally provide access
61305+ to hardware and debug information that isn't appropriate for unprivileged
61306+ users of the system. Sysfs and debugfs have also become a large source
61307+ of new vulnerabilities, ranging from infoleaks to local compromise.
61308+ There has been very little oversight with an eye toward security involved
61309+ in adding new exporters of information to these filesystems, so their
61310+ use is discouraged.
61311+ For reasons of compatibility, a few directories have been whitelisted
61312+ for access by non-root users:
61313+ /sys/fs/selinux
61314+ /sys/fs/fuse
61315+ /sys/devices/system/cpu
61316+
61317+config GRKERNSEC_ROFS
61318+ bool "Runtime read-only mount protection"
61319+ help
61320+ If you say Y here, a sysctl option with name "romount_protect" will
61321+ be created. By setting this option to 1 at runtime, filesystems
61322+ will be protected in the following ways:
61323+ * No new writable mounts will be allowed
61324+ * Existing read-only mounts won't be able to be remounted read/write
61325+ * Write operations will be denied on all block devices
61326+ This option acts independently of grsec_lock: once it is set to 1,
61327+ it cannot be turned off. Therefore, please be mindful of the resulting
61328+ behavior if this option is enabled in an init script on a read-only
61329+ filesystem. This feature is mainly intended for secure embedded systems.
61330+
61331+config GRKERNSEC_DEVICE_SIDECHANNEL
61332+ bool "Eliminate stat/notify-based device sidechannels"
61333+ default y if GRKERNSEC_CONFIG_AUTO
61334+ help
61335+ If you say Y here, timing analyses on block or character
61336+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
61337+ will be thwarted for unprivileged users. If a process without
61338+ CAP_MKNOD stats such a device, the last access and last modify times
61339+ will match the device's create time. No access or modify events
61340+ will be triggered through inotify/dnotify/fanotify for such devices.
61341+ This feature will prevent attacks that may at a minimum
61342+ allow an attacker to determine the administrator's password length.
61343+
61344+config GRKERNSEC_CHROOT
61345+ bool "Chroot jail restrictions"
61346+ default y if GRKERNSEC_CONFIG_AUTO
61347+ help
61348+ If you say Y here, you will be able to choose several options that will
61349+ make breaking out of a chrooted jail much more difficult. If you
61350+ encounter no software incompatibilities with the following options, it
61351+ is recommended that you enable each one.
61352+
61353+config GRKERNSEC_CHROOT_MOUNT
61354+ bool "Deny mounts"
61355+ default y if GRKERNSEC_CONFIG_AUTO
61356+ depends on GRKERNSEC_CHROOT
61357+ help
61358+ If you say Y here, processes inside a chroot will not be able to
61359+ mount or remount filesystems. If the sysctl option is enabled, a
61360+ sysctl option with name "chroot_deny_mount" is created.
61361+
61362+config GRKERNSEC_CHROOT_DOUBLE
61363+ bool "Deny double-chroots"
61364+ default y if GRKERNSEC_CONFIG_AUTO
61365+ depends on GRKERNSEC_CHROOT
61366+ help
61367+ If you say Y here, processes inside a chroot will not be able to chroot
61368+ again outside the chroot. This is a widely used method of breaking
61369+ out of a chroot jail and should not be allowed. If the sysctl
61370+ option is enabled, a sysctl option with name
61371+ "chroot_deny_chroot" is created.
61372+
61373+config GRKERNSEC_CHROOT_PIVOT
61374+ bool "Deny pivot_root in chroot"
61375+ default y if GRKERNSEC_CONFIG_AUTO
61376+ depends on GRKERNSEC_CHROOT
61377+ help
61378+ If you say Y here, processes inside a chroot will not be able to use
61379+ a function called pivot_root() that was introduced in Linux 2.3.41. It
61380+ works similar to chroot in that it changes the root filesystem. This
61381+ function could be misused in a chrooted process to attempt to break out
61382+ of the chroot, and therefore should not be allowed. If the sysctl
61383+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
61384+ created.
61385+
61386+config GRKERNSEC_CHROOT_CHDIR
61387+ bool "Enforce chdir(\"/\") on all chroots"
61388+ default y if GRKERNSEC_CONFIG_AUTO
61389+ depends on GRKERNSEC_CHROOT
61390+ help
61391+ If you say Y here, the current working directory of all newly-chrooted
61392+ applications will be set to the the root directory of the chroot.
61393+ The man page on chroot(2) states:
61394+ Note that this call does not change the current working
61395+ directory, so that `.' can be outside the tree rooted at
61396+ `/'. In particular, the super-user can escape from a
61397+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
61398+
61399+ It is recommended that you say Y here, since it's not known to break
61400+ any software. If the sysctl option is enabled, a sysctl option with
61401+ name "chroot_enforce_chdir" is created.
61402+
61403+config GRKERNSEC_CHROOT_CHMOD
61404+ bool "Deny (f)chmod +s"
61405+ default y if GRKERNSEC_CONFIG_AUTO
61406+ depends on GRKERNSEC_CHROOT
61407+ help
61408+ If you say Y here, processes inside a chroot will not be able to chmod
61409+ or fchmod files to make them have suid or sgid bits. This protects
61410+ against another published method of breaking a chroot. If the sysctl
61411+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
61412+ created.
61413+
61414+config GRKERNSEC_CHROOT_FCHDIR
61415+ bool "Deny fchdir out of chroot"
61416+ default y if GRKERNSEC_CONFIG_AUTO
61417+ depends on GRKERNSEC_CHROOT
61418+ help
61419+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
61420+ to a file descriptor of the chrooting process that points to a directory
61421+ outside the filesystem will be stopped. If the sysctl option
61422+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
61423+
61424+config GRKERNSEC_CHROOT_MKNOD
61425+ bool "Deny mknod"
61426+ default y if GRKERNSEC_CONFIG_AUTO
61427+ depends on GRKERNSEC_CHROOT
61428+ help
61429+ If you say Y here, processes inside a chroot will not be allowed to
61430+ mknod. The problem with using mknod inside a chroot is that it
61431+ would allow an attacker to create a device entry that is the same
61432+ as one on the physical root of your system, which could range from
61433+ anything from the console device to a device for your harddrive (which
61434+ they could then use to wipe the drive or steal data). It is recommended
61435+ that you say Y here, unless you run into software incompatibilities.
61436+ If the sysctl option is enabled, a sysctl option with name
61437+ "chroot_deny_mknod" is created.
61438+
61439+config GRKERNSEC_CHROOT_SHMAT
61440+ bool "Deny shmat() out of chroot"
61441+ default y if GRKERNSEC_CONFIG_AUTO
61442+ depends on GRKERNSEC_CHROOT
61443+ help
61444+ If you say Y here, processes inside a chroot will not be able to attach
61445+ to shared memory segments that were created outside of the chroot jail.
61446+ It is recommended that you say Y here. If the sysctl option is enabled,
61447+ a sysctl option with name "chroot_deny_shmat" is created.
61448+
61449+config GRKERNSEC_CHROOT_UNIX
61450+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
61451+ default y if GRKERNSEC_CONFIG_AUTO
61452+ depends on GRKERNSEC_CHROOT
61453+ help
61454+ If you say Y here, processes inside a chroot will not be able to
61455+ connect to abstract (meaning not belonging to a filesystem) Unix
61456+ domain sockets that were bound outside of a chroot. It is recommended
61457+ that you say Y here. If the sysctl option is enabled, a sysctl option
61458+ with name "chroot_deny_unix" is created.
61459+
61460+config GRKERNSEC_CHROOT_FINDTASK
61461+ bool "Protect outside processes"
61462+ default y if GRKERNSEC_CONFIG_AUTO
61463+ depends on GRKERNSEC_CHROOT
61464+ help
61465+ If you say Y here, processes inside a chroot will not be able to
61466+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
61467+ getsid, or view any process outside of the chroot. If the sysctl
61468+ option is enabled, a sysctl option with name "chroot_findtask" is
61469+ created.
61470+
61471+config GRKERNSEC_CHROOT_NICE
61472+ bool "Restrict priority changes"
61473+ default y if GRKERNSEC_CONFIG_AUTO
61474+ depends on GRKERNSEC_CHROOT
61475+ help
61476+ If you say Y here, processes inside a chroot will not be able to raise
61477+ the priority of processes in the chroot, or alter the priority of
61478+ processes outside the chroot. This provides more security than simply
61479+ removing CAP_SYS_NICE from the process' capability set. If the
61480+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
61481+ is created.
61482+
61483+config GRKERNSEC_CHROOT_SYSCTL
61484+ bool "Deny sysctl writes"
61485+ default y if GRKERNSEC_CONFIG_AUTO
61486+ depends on GRKERNSEC_CHROOT
61487+ help
61488+ If you say Y here, an attacker in a chroot will not be able to
61489+ write to sysctl entries, either by sysctl(2) or through a /proc
61490+ interface. It is strongly recommended that you say Y here. If the
61491+ sysctl option is enabled, a sysctl option with name
61492+ "chroot_deny_sysctl" is created.
61493+
61494+config GRKERNSEC_CHROOT_CAPS
61495+ bool "Capability restrictions"
61496+ default y if GRKERNSEC_CONFIG_AUTO
61497+ depends on GRKERNSEC_CHROOT
61498+ help
61499+ If you say Y here, the capabilities on all processes within a
61500+ chroot jail will be lowered to stop module insertion, raw i/o,
61501+ system and net admin tasks, rebooting the system, modifying immutable
61502+ files, modifying IPC owned by another, and changing the system time.
61503+ This is left an option because it can break some apps. Disable this
61504+ if your chrooted apps are having problems performing those kinds of
61505+ tasks. If the sysctl option is enabled, a sysctl option with
61506+ name "chroot_caps" is created.
61507+
61508+config GRKERNSEC_CHROOT_INITRD
61509+ bool "Exempt initrd tasks from restrictions"
61510+ default y if GRKERNSEC_CONFIG_AUTO
61511+ depends on GRKERNSEC_CHROOT && BLK_DEV_RAM
61512+ help
61513+ If you say Y here, tasks started prior to init will be exempted from
61514+ grsecurity's chroot restrictions. This option is mainly meant to
61515+ resolve Plymouth's performing privileged operations unnecessarily
61516+ in a chroot.
61517+
61518+endmenu
61519+menu "Kernel Auditing"
61520+depends on GRKERNSEC
61521+
61522+config GRKERNSEC_AUDIT_GROUP
61523+ bool "Single group for auditing"
61524+ help
61525+ If you say Y here, the exec and chdir logging features will only operate
61526+ on a group you specify. This option is recommended if you only want to
61527+ watch certain users instead of having a large amount of logs from the
61528+ entire system. If the sysctl option is enabled, a sysctl option with
61529+ name "audit_group" is created.
61530+
61531+config GRKERNSEC_AUDIT_GID
61532+ int "GID for auditing"
61533+ depends on GRKERNSEC_AUDIT_GROUP
61534+ default 1007
61535+
61536+config GRKERNSEC_EXECLOG
61537+ bool "Exec logging"
61538+ help
61539+ If you say Y here, all execve() calls will be logged (since the
61540+ other exec*() calls are frontends to execve(), all execution
61541+ will be logged). Useful for shell-servers that like to keep track
61542+ of their users. If the sysctl option is enabled, a sysctl option with
61543+ name "exec_logging" is created.
61544+ WARNING: This option when enabled will produce a LOT of logs, especially
61545+ on an active system.
61546+
61547+config GRKERNSEC_RESLOG
61548+ bool "Resource logging"
61549+ default y if GRKERNSEC_CONFIG_AUTO
61550+ help
61551+ If you say Y here, all attempts to overstep resource limits will
61552+ be logged with the resource name, the requested size, and the current
61553+ limit. It is highly recommended that you say Y here. If the sysctl
61554+ option is enabled, a sysctl option with name "resource_logging" is
61555+ created. If the RBAC system is enabled, the sysctl value is ignored.
61556+
61557+config GRKERNSEC_CHROOT_EXECLOG
61558+ bool "Log execs within chroot"
61559+ help
61560+ If you say Y here, all executions inside a chroot jail will be logged
61561+ to syslog. This can cause a large amount of logs if certain
61562+ applications (eg. djb's daemontools) are installed on the system, and
61563+ is therefore left as an option. If the sysctl option is enabled, a
61564+ sysctl option with name "chroot_execlog" is created.
61565+
61566+config GRKERNSEC_AUDIT_PTRACE
61567+ bool "Ptrace logging"
61568+ help
61569+ If you say Y here, all attempts to attach to a process via ptrace
61570+ will be logged. If the sysctl option is enabled, a sysctl option
61571+ with name "audit_ptrace" is created.
61572+
61573+config GRKERNSEC_AUDIT_CHDIR
61574+ bool "Chdir logging"
61575+ help
61576+ If you say Y here, all chdir() calls will be logged. If the sysctl
61577+ option is enabled, a sysctl option with name "audit_chdir" is created.
61578+
61579+config GRKERNSEC_AUDIT_MOUNT
61580+ bool "(Un)Mount logging"
61581+ help
61582+ If you say Y here, all mounts and unmounts will be logged. If the
61583+ sysctl option is enabled, a sysctl option with name "audit_mount" is
61584+ created.
61585+
61586+config GRKERNSEC_SIGNAL
61587+ bool "Signal logging"
61588+ default y if GRKERNSEC_CONFIG_AUTO
61589+ help
61590+ If you say Y here, certain important signals will be logged, such as
61591+ SIGSEGV, which will as a result inform you of when a error in a program
61592+ occurred, which in some cases could mean a possible exploit attempt.
61593+ If the sysctl option is enabled, a sysctl option with name
61594+ "signal_logging" is created.
61595+
61596+config GRKERNSEC_FORKFAIL
61597+ bool "Fork failure logging"
61598+ help
61599+ If you say Y here, all failed fork() attempts will be logged.
61600+ This could suggest a fork bomb, or someone attempting to overstep
61601+ their process limit. If the sysctl option is enabled, a sysctl option
61602+ with name "forkfail_logging" is created.
61603+
61604+config GRKERNSEC_TIME
61605+ bool "Time change logging"
61606+ default y if GRKERNSEC_CONFIG_AUTO
61607+ help
61608+ If you say Y here, any changes of the system clock will be logged.
61609+ If the sysctl option is enabled, a sysctl option with name
61610+ "timechange_logging" is created.
61611+
61612+config GRKERNSEC_PROC_IPADDR
61613+ bool "/proc/<pid>/ipaddr support"
61614+ default y if GRKERNSEC_CONFIG_AUTO
61615+ help
61616+ If you say Y here, a new entry will be added to each /proc/<pid>
61617+ directory that contains the IP address of the person using the task.
61618+ The IP is carried across local TCP and AF_UNIX stream sockets.
61619+ This information can be useful for IDS/IPSes to perform remote response
61620+ to a local attack. The entry is readable by only the owner of the
61621+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
61622+ the RBAC system), and thus does not create privacy concerns.
61623+
61624+config GRKERNSEC_RWXMAP_LOG
61625+ bool 'Denied RWX mmap/mprotect logging'
61626+ default y if GRKERNSEC_CONFIG_AUTO
61627+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
61628+ help
61629+ If you say Y here, calls to mmap() and mprotect() with explicit
61630+ usage of PROT_WRITE and PROT_EXEC together will be logged when
61631+ denied by the PAX_MPROTECT feature. This feature will also
61632+ log other problematic scenarios that can occur when PAX_MPROTECT
61633+ is enabled on a binary, like textrels and PT_GNU_STACK. If the
61634+ sysctl option is enabled, a sysctl option with name "rwxmap_logging"
61635+ is created.
61636+
61637+endmenu
61638+
61639+menu "Executable Protections"
61640+depends on GRKERNSEC
61641+
61642+config GRKERNSEC_DMESG
61643+ bool "Dmesg(8) restriction"
61644+ default y if GRKERNSEC_CONFIG_AUTO
61645+ help
61646+ If you say Y here, non-root users will not be able to use dmesg(8)
61647+ to view the contents of the kernel's circular log buffer.
61648+ The kernel's log buffer often contains kernel addresses and other
61649+ identifying information useful to an attacker in fingerprinting a
61650+ system for a targeted exploit.
61651+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
61652+ created.
61653+
61654+config GRKERNSEC_HARDEN_PTRACE
61655+ bool "Deter ptrace-based process snooping"
61656+ default y if GRKERNSEC_CONFIG_AUTO
61657+ help
61658+ If you say Y here, TTY sniffers and other malicious monitoring
61659+ programs implemented through ptrace will be defeated. If you
61660+ have been using the RBAC system, this option has already been
61661+ enabled for several years for all users, with the ability to make
61662+ fine-grained exceptions.
61663+
61664+ This option only affects the ability of non-root users to ptrace
61665+ processes that are not a descendent of the ptracing process.
61666+ This means that strace ./binary and gdb ./binary will still work,
61667+ but attaching to arbitrary processes will not. If the sysctl
61668+ option is enabled, a sysctl option with name "harden_ptrace" is
61669+ created.
61670+
61671+config GRKERNSEC_PTRACE_READEXEC
61672+ bool "Require read access to ptrace sensitive binaries"
61673+ default y if GRKERNSEC_CONFIG_AUTO
61674+ help
61675+ If you say Y here, unprivileged users will not be able to ptrace unreadable
61676+ binaries. This option is useful in environments that
61677+ remove the read bits (e.g. file mode 4711) from suid binaries to
61678+ prevent infoleaking of their contents. This option adds
61679+ consistency to the use of that file mode, as the binary could normally
61680+ be read out when run without privileges while ptracing.
61681+
61682+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
61683+ is created.
61684+
61685+config GRKERNSEC_SETXID
61686+ bool "Enforce consistent multithreaded privileges"
61687+ default y if GRKERNSEC_CONFIG_AUTO
61688+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
61689+ help
61690+ If you say Y here, a change from a root uid to a non-root uid
61691+ in a multithreaded application will cause the resulting uids,
61692+ gids, supplementary groups, and capabilities in that thread
61693+ to be propagated to the other threads of the process. In most
61694+ cases this is unnecessary, as glibc will emulate this behavior
61695+ on behalf of the application. Other libcs do not act in the
61696+ same way, allowing the other threads of the process to continue
61697+ running with root privileges. If the sysctl option is enabled,
61698+ a sysctl option with name "consistent_setxid" is created.
61699+
61700+config GRKERNSEC_TPE
61701+ bool "Trusted Path Execution (TPE)"
61702+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
61703+ help
61704+ If you say Y here, you will be able to choose a gid to add to the
61705+ supplementary groups of users you want to mark as "untrusted."
61706+ These users will not be able to execute any files that are not in
61707+ root-owned directories writable only by root. If the sysctl option
61708+ is enabled, a sysctl option with name "tpe" is created.
61709+
61710+config GRKERNSEC_TPE_ALL
61711+ bool "Partially restrict all non-root users"
61712+ depends on GRKERNSEC_TPE
61713+ help
61714+ If you say Y here, all non-root users will be covered under
61715+ a weaker TPE restriction. This is separate from, and in addition to,
61716+ the main TPE options that you have selected elsewhere. Thus, if a
61717+ "trusted" GID is chosen, this restriction applies to even that GID.
61718+ Under this restriction, all non-root users will only be allowed to
61719+ execute files in directories they own that are not group or
61720+ world-writable, or in directories owned by root and writable only by
61721+ root. If the sysctl option is enabled, a sysctl option with name
61722+ "tpe_restrict_all" is created.
61723+
61724+config GRKERNSEC_TPE_INVERT
61725+ bool "Invert GID option"
61726+ depends on GRKERNSEC_TPE
61727+ help
61728+ If you say Y here, the group you specify in the TPE configuration will
61729+ decide what group TPE restrictions will be *disabled* for. This
61730+ option is useful if you want TPE restrictions to be applied to most
61731+ users on the system. If the sysctl option is enabled, a sysctl option
61732+ with name "tpe_invert" is created. Unlike other sysctl options, this
61733+ entry will default to on for backward-compatibility.
61734+
61735+config GRKERNSEC_TPE_GID
61736+ int
61737+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
61738+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
61739+
61740+config GRKERNSEC_TPE_UNTRUSTED_GID
61741+ int "GID for TPE-untrusted users"
61742+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
61743+ default 1005
61744+ help
61745+ Setting this GID determines what group TPE restrictions will be
61746+ *enabled* for. If the sysctl option is enabled, a sysctl option
61747+ with name "tpe_gid" is created.
61748+
61749+config GRKERNSEC_TPE_TRUSTED_GID
61750+ int "GID for TPE-trusted users"
61751+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
61752+ default 1005
61753+ help
61754+ Setting this GID determines what group TPE restrictions will be
61755+ *disabled* for. If the sysctl option is enabled, a sysctl option
61756+ with name "tpe_gid" is created.
61757+
61758+endmenu
61759+menu "Network Protections"
61760+depends on GRKERNSEC
61761+
61762+config GRKERNSEC_RANDNET
61763+ bool "Larger entropy pools"
61764+ default y if GRKERNSEC_CONFIG_AUTO
61765+ help
61766+ If you say Y here, the entropy pools used for many features of Linux
61767+ and grsecurity will be doubled in size. Since several grsecurity
61768+ features use additional randomness, it is recommended that you say Y
61769+ here. Saying Y here has a similar effect as modifying
61770+ /proc/sys/kernel/random/poolsize.
61771+
61772+config GRKERNSEC_BLACKHOLE
61773+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
61774+ default y if GRKERNSEC_CONFIG_AUTO
61775+ depends on NET
61776+ help
61777+ If you say Y here, neither TCP resets nor ICMP
61778+ destination-unreachable packets will be sent in response to packets
61779+ sent to ports for which no associated listening process exists.
61780+ This feature supports both IPV4 and IPV6 and exempts the
61781+ loopback interface from blackholing. Enabling this feature
61782+ makes a host more resilient to DoS attacks and reduces network
61783+ visibility against scanners.
61784+
61785+ The blackhole feature as-implemented is equivalent to the FreeBSD
61786+ blackhole feature, as it prevents RST responses to all packets, not
61787+ just SYNs. Under most application behavior this causes no
61788+ problems, but applications (like haproxy) may not close certain
61789+ connections in a way that cleanly terminates them on the remote
61790+ end, leaving the remote host in LAST_ACK state. Because of this
61791+ side-effect and to prevent intentional LAST_ACK DoSes, this
61792+ feature also adds automatic mitigation against such attacks.
61793+ The mitigation drastically reduces the amount of time a socket
61794+ can spend in LAST_ACK state. If you're using haproxy and not
61795+ all servers it connects to have this option enabled, consider
61796+ disabling this feature on the haproxy host.
61797+
61798+ If the sysctl option is enabled, two sysctl options with names
61799+ "ip_blackhole" and "lastack_retries" will be created.
61800+ While "ip_blackhole" takes the standard zero/non-zero on/off
61801+ toggle, "lastack_retries" uses the same kinds of values as
61802+ "tcp_retries1" and "tcp_retries2". The default value of 4
61803+ prevents a socket from lasting more than 45 seconds in LAST_ACK
61804+ state.
61805+
61806+config GRKERNSEC_NO_SIMULT_CONNECT
61807+ bool "Disable TCP Simultaneous Connect"
61808+ default y if GRKERNSEC_CONFIG_AUTO
61809+ depends on NET
61810+ help
61811+ If you say Y here, a feature by Willy Tarreau will be enabled that
61812+ removes a weakness in Linux's strict implementation of TCP that
61813+ allows two clients to connect to each other without either entering
61814+ a listening state. The weakness allows an attacker to easily prevent
61815+ a client from connecting to a known server provided the source port
61816+ for the connection is guessed correctly.
61817+
61818+ As the weakness could be used to prevent an antivirus or IPS from
61819+ fetching updates, or prevent an SSL gateway from fetching a CRL,
61820+ it should be eliminated by enabling this option. Though Linux is
61821+ one of few operating systems supporting simultaneous connect, it
61822+ has no legitimate use in practice and is rarely supported by firewalls.
61823+
61824+config GRKERNSEC_SOCKET
61825+ bool "Socket restrictions"
61826+ depends on NET
61827+ help
61828+ If you say Y here, you will be able to choose from several options.
61829+ If you assign a GID on your system and add it to the supplementary
61830+ groups of users you want to restrict socket access to, this patch
61831+ will perform up to three things, based on the option(s) you choose.
61832+
61833+config GRKERNSEC_SOCKET_ALL
61834+ bool "Deny any sockets to group"
61835+ depends on GRKERNSEC_SOCKET
61836+ help
61837+ If you say Y here, you will be able to choose a GID of whose users will
61838+ be unable to connect to other hosts from your machine or run server
61839+ applications from your machine. If the sysctl option is enabled, a
61840+ sysctl option with name "socket_all" is created.
61841+
61842+config GRKERNSEC_SOCKET_ALL_GID
61843+ int "GID to deny all sockets for"
61844+ depends on GRKERNSEC_SOCKET_ALL
61845+ default 1004
61846+ help
61847+ Here you can choose the GID to disable socket access for. Remember to
61848+ add the users you want socket access disabled for to the GID
61849+ specified here. If the sysctl option is enabled, a sysctl option
61850+ with name "socket_all_gid" is created.
61851+
61852+config GRKERNSEC_SOCKET_CLIENT
61853+ bool "Deny client sockets to group"
61854+ depends on GRKERNSEC_SOCKET
61855+ help
61856+ If you say Y here, you will be able to choose a GID of whose users will
61857+ be unable to connect to other hosts from your machine, but will be
61858+ able to run servers. If this option is enabled, all users in the group
61859+ you specify will have to use passive mode when initiating ftp transfers
61860+ from the shell on your machine. If the sysctl option is enabled, a
61861+ sysctl option with name "socket_client" is created.
61862+
61863+config GRKERNSEC_SOCKET_CLIENT_GID
61864+ int "GID to deny client sockets for"
61865+ depends on GRKERNSEC_SOCKET_CLIENT
61866+ default 1003
61867+ help
61868+ Here you can choose the GID to disable client socket access for.
61869+ Remember to add the users you want client socket access disabled for to
61870+ the GID specified here. If the sysctl option is enabled, a sysctl
61871+ option with name "socket_client_gid" is created.
61872+
61873+config GRKERNSEC_SOCKET_SERVER
61874+ bool "Deny server sockets to group"
61875+ depends on GRKERNSEC_SOCKET
61876+ help
61877+ If you say Y here, you will be able to choose a GID of whose users will
61878+ be unable to run server applications from your machine. If the sysctl
61879+ option is enabled, a sysctl option with name "socket_server" is created.
61880+
61881+config GRKERNSEC_SOCKET_SERVER_GID
61882+ int "GID to deny server sockets for"
61883+ depends on GRKERNSEC_SOCKET_SERVER
61884+ default 1002
61885+ help
61886+ Here you can choose the GID to disable server socket access for.
61887+ Remember to add the users you want server socket access disabled for to
61888+ the GID specified here. If the sysctl option is enabled, a sysctl
61889+ option with name "socket_server_gid" is created.
61890+
61891+endmenu
e2b79cd1
AF
61892+
61893+menu "Physical Protections"
61894+depends on GRKERNSEC
61895+
61896+config GRKERNSEC_DENYUSB
61897+ bool "Deny new USB connections after toggle"
61898+ default y if GRKERNSEC_CONFIG_AUTO
61899+ help
61900+ If you say Y here, a new sysctl option with name "deny_new_usb"
61901+ will be created. Setting its value to 1 will prevent any new
61902+ USB devices from being recognized by the OS. Any attempted USB
61903+ device insertion will be logged. This option is intended to be
61904+ used against custom USB devices designed to exploit vulnerabilities
61905+ in various USB device drivers.
61906+
61907+ For greatest effectiveness, this sysctl should be set after any
61908+ relevant init scripts. Once set, it cannot be unset.
61909+
61910+endmenu
61911+
bb5f0bf8
AF
61912+menu "Sysctl Support"
61913+depends on GRKERNSEC && SYSCTL
61914+
61915+config GRKERNSEC_SYSCTL
61916+ bool "Sysctl support"
61917+ default y if GRKERNSEC_CONFIG_AUTO
61918+ help
61919+ If you say Y here, you will be able to change the options that
61920+ grsecurity runs with at bootup, without having to recompile your
61921+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
61922+ to enable (1) or disable (0) various features. All the sysctl entries
61923+ are mutable until the "grsec_lock" entry is set to a non-zero value.
61924+ All features enabled in the kernel configuration are disabled at boot
61925+ if you do not say Y to the "Turn on features by default" option.
61926+ All options should be set at startup, and the grsec_lock entry should
61927+ be set to a non-zero value after all the options are set.
61928+ *THIS IS EXTREMELY IMPORTANT*
61929+
61930+config GRKERNSEC_SYSCTL_DISTRO
61931+ bool "Extra sysctl support for distro makers (READ HELP)"
61932+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
61933+ help
61934+ If you say Y here, additional sysctl options will be created
61935+ for features that affect processes running as root. Therefore,
61936+ it is critical when using this option that the grsec_lock entry be
61937+ enabled after boot. Only distros with prebuilt kernel packages
61938+ with this option enabled that can ensure grsec_lock is enabled
61939+ after boot should use this option.
61940+ *Failure to set grsec_lock after boot makes all grsec features
61941+ this option covers useless*
61942+
61943+ Currently this option creates the following sysctl entries:
61944+ "Disable Privileged I/O": "disable_priv_io"
61945+
61946+config GRKERNSEC_SYSCTL_ON
61947+ bool "Turn on features by default"
61948+ default y if GRKERNSEC_CONFIG_AUTO
61949+ depends on GRKERNSEC_SYSCTL
61950+ help
61951+ If you say Y here, instead of having all features enabled in the
61952+ kernel configuration disabled at boot time, the features will be
61953+ enabled at boot time. It is recommended you say Y here unless
61954+ there is some reason you would want all sysctl-tunable features to
61955+ be disabled by default. As mentioned elsewhere, it is important
61956+ to enable the grsec_lock entry once you have finished modifying
61957+ the sysctl entries.
61958+
61959+endmenu
61960+menu "Logging Options"
61961+depends on GRKERNSEC
61962+
61963+config GRKERNSEC_FLOODTIME
61964+ int "Seconds in between log messages (minimum)"
61965+ default 10
61966+ help
61967+ This option allows you to enforce the number of seconds between
61968+ grsecurity log messages. The default should be suitable for most
61969+ people, however, if you choose to change it, choose a value small enough
61970+ to allow informative logs to be produced, but large enough to
61971+ prevent flooding.
61972+
61973+config GRKERNSEC_FLOODBURST
61974+ int "Number of messages in a burst (maximum)"
61975+ default 6
61976+ help
61977+ This option allows you to choose the maximum number of messages allowed
61978+ within the flood time interval you chose in a separate option. The
61979+ default should be suitable for most people, however if you find that
61980+ many of your logs are being interpreted as flooding, you may want to
61981+ raise this value.
61982+
61983+endmenu
61984diff --git a/grsecurity/Makefile b/grsecurity/Makefile
61985new file mode 100644
e2b79cd1 61986index 0000000..b0b77d5
bb5f0bf8
AF
61987--- /dev/null
61988+++ b/grsecurity/Makefile
e2b79cd1 61989@@ -0,0 +1,43 @@
bb5f0bf8
AF
61990+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
61991+# during 2001-2009 it has been completely redesigned by Brad Spengler
61992+# into an RBAC system
61993+#
61994+# All code in this directory and various hooks inserted throughout the kernel
61995+# are copyright Brad Spengler - Open Source Security, Inc., and released
61996+# under the GPL v2 or higher
61997+
61998+KBUILD_CFLAGS += -Werror
61999+
62000+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
62001+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
e2b79cd1
AF
62002+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o \
62003+ grsec_usb.o
bb5f0bf8
AF
62004+
62005+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
62006+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
62007+ gracl_learn.o grsec_log.o
62008+ifdef CONFIG_COMPAT
62009+obj-$(CONFIG_GRKERNSEC) += gracl_compat.o
62010+endif
62011+
62012+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
62013+
62014+ifdef CONFIG_NET
62015+obj-y += grsec_sock.o
62016+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
62017+endif
62018+
62019+ifndef CONFIG_GRKERNSEC
62020+obj-y += grsec_disabled.o
62021+endif
62022+
62023+ifdef CONFIG_GRKERNSEC_HIDESYM
62024+extra-y := grsec_hidesym.o
62025+$(obj)/grsec_hidesym.o:
62026+ @-chmod -f 500 /boot
62027+ @-chmod -f 500 /lib/modules
62028+ @-chmod -f 500 /lib64/modules
62029+ @-chmod -f 500 /lib32/modules
62030+ @-chmod -f 700 .
62031+ @echo ' grsec: protected kernel image paths'
62032+endif
62033diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
62034new file mode 100644
62035index 0000000..c0793fd
62036--- /dev/null
62037+++ b/grsecurity/gracl.c
62038@@ -0,0 +1,4178 @@
62039+#include <linux/kernel.h>
62040+#include <linux/module.h>
62041+#include <linux/sched.h>
62042+#include <linux/mm.h>
62043+#include <linux/file.h>
62044+#include <linux/fs.h>
62045+#include <linux/namei.h>
62046+#include <linux/mount.h>
62047+#include <linux/tty.h>
62048+#include <linux/proc_fs.h>
62049+#include <linux/lglock.h>
62050+#include <linux/slab.h>
62051+#include <linux/vmalloc.h>
62052+#include <linux/types.h>
62053+#include <linux/sysctl.h>
62054+#include <linux/netdevice.h>
62055+#include <linux/ptrace.h>
62056+#include <linux/gracl.h>
62057+#include <linux/gralloc.h>
62058+#include <linux/security.h>
62059+#include <linux/grinternal.h>
62060+#include <linux/pid_namespace.h>
62061+#include <linux/stop_machine.h>
62062+#include <linux/fdtable.h>
62063+#include <linux/percpu.h>
62064+#include <linux/lglock.h>
62065+#include <linux/hugetlb.h>
62066+#include <linux/posix-timers.h>
62067+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
62068+#include <linux/magic.h>
62069+#include <linux/pagemap.h>
62070+#include "../fs/btrfs/async-thread.h"
62071+#include "../fs/btrfs/ctree.h"
62072+#include "../fs/btrfs/btrfs_inode.h"
62073+#endif
62074+#include "../fs/mount.h"
62075+
62076+#include <asm/uaccess.h>
62077+#include <asm/errno.h>
62078+#include <asm/mman.h>
62079+
62080+extern struct lglock vfsmount_lock;
62081+
62082+static struct acl_role_db acl_role_set;
62083+static struct name_db name_set;
62084+static struct inodev_db inodev_set;
62085+
62086+/* for keeping track of userspace pointers used for subjects, so we
62087+ can share references in the kernel as well
62088+*/
62089+
62090+static struct path real_root;
62091+
62092+static struct acl_subj_map_db subj_map_set;
62093+
62094+static struct acl_role_label *default_role;
62095+
62096+static struct acl_role_label *role_list;
62097+
62098+static u16 acl_sp_role_value;
62099+
62100+extern char *gr_shared_page[4];
62101+static DEFINE_MUTEX(gr_dev_mutex);
62102+DEFINE_RWLOCK(gr_inode_lock);
62103+
62104+struct gr_arg *gr_usermode;
62105+
62106+static unsigned int gr_status __read_only = GR_STATUS_INIT;
62107+
62108+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
62109+extern void gr_clear_learn_entries(void);
62110+
62111+unsigned char *gr_system_salt;
62112+unsigned char *gr_system_sum;
62113+
62114+static struct sprole_pw **acl_special_roles = NULL;
62115+static __u16 num_sprole_pws = 0;
62116+
62117+static struct acl_role_label *kernel_role = NULL;
62118+
62119+static unsigned int gr_auth_attempts = 0;
62120+static unsigned long gr_auth_expires = 0UL;
62121+
62122+#ifdef CONFIG_NET
62123+extern struct vfsmount *sock_mnt;
62124+#endif
62125+
62126+extern struct vfsmount *pipe_mnt;
62127+extern struct vfsmount *shm_mnt;
62128+
62129+#ifdef CONFIG_HUGETLBFS
62130+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
62131+#endif
62132+
62133+static struct acl_object_label *fakefs_obj_rw;
62134+static struct acl_object_label *fakefs_obj_rwx;
62135+
62136+extern int gr_init_uidset(void);
62137+extern void gr_free_uidset(void);
62138+extern void gr_remove_uid(uid_t uid);
62139+extern int gr_find_uid(uid_t uid);
62140+
62141+static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp)
62142+{
62143+ if (copy_from_user(obj, userp, sizeof(struct acl_object_label)))
62144+ return -EFAULT;
62145+
62146+ return 0;
62147+}
62148+
62149+static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp)
62150+{
62151+ if (copy_from_user(ip, userp, sizeof(struct acl_ip_label)))
62152+ return -EFAULT;
62153+
62154+ return 0;
62155+}
62156+
62157+static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp)
62158+{
62159+ if (copy_from_user(subj, userp, sizeof(struct acl_subject_label)))
62160+ return -EFAULT;
62161+
62162+ return 0;
62163+}
62164+
62165+static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp)
62166+{
62167+ if (copy_from_user(role, userp, sizeof(struct acl_role_label)))
62168+ return -EFAULT;
62169+
62170+ return 0;
62171+}
62172+
62173+static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
62174+{
62175+ if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip)))
62176+ return -EFAULT;
62177+
62178+ return 0;
62179+}
62180+
62181+static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
62182+{
62183+ if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw)))
62184+ return -EFAULT;
62185+
62186+ return 0;
62187+}
62188+
62189+static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
62190+{
62191+ if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct)))
62192+ return -EFAULT;
62193+
62194+ return 0;
62195+}
62196+
62197+static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp)
62198+{
62199+ if (copy_from_user(trans, userp, sizeof(struct role_transition)))
62200+ return -EFAULT;
62201+
62202+ return 0;
62203+}
62204+
62205+int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp)
62206+{
62207+ if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *)))
62208+ return -EFAULT;
62209+
62210+ return 0;
62211+}
62212+
62213+static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap)
62214+{
62215+ if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper)))
62216+ return -EFAULT;
62217+
62218+ if ((uwrap->version != GRSECURITY_VERSION) || (uwrap->size != sizeof(struct gr_arg)))
62219+ return -EINVAL;
62220+
62221+ return 0;
62222+}
62223+
62224+static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg)
62225+{
62226+ if (copy_from_user(arg, buf, sizeof (struct gr_arg)))
62227+ return -EFAULT;
62228+
62229+ return 0;
62230+}
62231+
62232+static size_t get_gr_arg_wrapper_size_normal(void)
62233+{
62234+ return sizeof(struct gr_arg_wrapper);
62235+}
62236+
62237+#ifdef CONFIG_COMPAT
62238+extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap);
62239+extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg);
62240+extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp);
62241+extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp);
62242+extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp);
62243+extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp);
62244+extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp);
62245+extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp);
62246+extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp);
62247+extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp);
62248+extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp);
62249+extern size_t get_gr_arg_wrapper_size_compat(void);
62250+
62251+int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only;
62252+int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only;
62253+int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only;
62254+int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only;
62255+int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only;
62256+int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only;
62257+int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only;
62258+int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only;
62259+int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only;
62260+int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only;
62261+int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only;
62262+size_t (* get_gr_arg_wrapper_size)(void) __read_only;
62263+
62264+#else
62265+#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal
62266+#define copy_gr_arg copy_gr_arg_normal
62267+#define copy_gr_hash_struct copy_gr_hash_struct_normal
62268+#define copy_acl_object_label copy_acl_object_label_normal
62269+#define copy_acl_subject_label copy_acl_subject_label_normal
62270+#define copy_acl_role_label copy_acl_role_label_normal
62271+#define copy_acl_ip_label copy_acl_ip_label_normal
62272+#define copy_pointer_from_array copy_pointer_from_array_normal
62273+#define copy_sprole_pw copy_sprole_pw_normal
62274+#define copy_role_transition copy_role_transition_normal
62275+#define copy_role_allowed_ip copy_role_allowed_ip_normal
62276+#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal
62277+#endif
62278+
62279+__inline__ int
62280+gr_acl_is_enabled(void)
62281+{
62282+ return (gr_status & GR_READY);
62283+}
62284+
62285+static inline dev_t __get_dev(const struct dentry *dentry)
62286+{
62287+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
62288+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
62289+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
62290+ else
62291+#endif
62292+ return dentry->d_sb->s_dev;
62293+}
62294+
62295+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
62296+{
62297+ return __get_dev(dentry);
62298+}
62299+
62300+static char gr_task_roletype_to_char(struct task_struct *task)
62301+{
62302+ switch (task->role->roletype &
62303+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
62304+ GR_ROLE_SPECIAL)) {
62305+ case GR_ROLE_DEFAULT:
62306+ return 'D';
62307+ case GR_ROLE_USER:
62308+ return 'U';
62309+ case GR_ROLE_GROUP:
62310+ return 'G';
62311+ case GR_ROLE_SPECIAL:
62312+ return 'S';
62313+ }
62314+
62315+ return 'X';
62316+}
62317+
62318+char gr_roletype_to_char(void)
62319+{
62320+ return gr_task_roletype_to_char(current);
62321+}
62322+
62323+__inline__ int
62324+gr_acl_tpe_check(void)
62325+{
62326+ if (unlikely(!(gr_status & GR_READY)))
62327+ return 0;
62328+ if (current->role->roletype & GR_ROLE_TPE)
62329+ return 1;
62330+ else
62331+ return 0;
62332+}
62333+
62334+int
62335+gr_handle_rawio(const struct inode *inode)
62336+{
62337+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62338+ if (inode && S_ISBLK(inode->i_mode) &&
62339+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
62340+ !capable(CAP_SYS_RAWIO))
62341+ return 1;
62342+#endif
62343+ return 0;
62344+}
62345+
62346+static int
62347+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
62348+{
62349+ if (likely(lena != lenb))
62350+ return 0;
62351+
62352+ return !memcmp(a, b, lena);
62353+}
62354+
62355+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
62356+{
62357+ *buflen -= namelen;
62358+ if (*buflen < 0)
62359+ return -ENAMETOOLONG;
62360+ *buffer -= namelen;
62361+ memcpy(*buffer, str, namelen);
62362+ return 0;
62363+}
62364+
62365+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
62366+{
62367+ return prepend(buffer, buflen, name->name, name->len);
62368+}
62369+
62370+static int prepend_path(const struct path *path, struct path *root,
62371+ char **buffer, int *buflen)
62372+{
62373+ struct dentry *dentry = path->dentry;
62374+ struct vfsmount *vfsmnt = path->mnt;
62375+ struct mount *mnt = real_mount(vfsmnt);
62376+ bool slash = false;
62377+ int error = 0;
62378+
62379+ while (dentry != root->dentry || vfsmnt != root->mnt) {
62380+ struct dentry * parent;
62381+
62382+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
62383+ /* Global root? */
62384+ if (!mnt_has_parent(mnt)) {
62385+ goto out;
62386+ }
62387+ dentry = mnt->mnt_mountpoint;
62388+ mnt = mnt->mnt_parent;
62389+ vfsmnt = &mnt->mnt;
62390+ continue;
62391+ }
62392+ parent = dentry->d_parent;
62393+ prefetch(parent);
62394+ spin_lock(&dentry->d_lock);
62395+ error = prepend_name(buffer, buflen, &dentry->d_name);
62396+ spin_unlock(&dentry->d_lock);
62397+ if (!error)
62398+ error = prepend(buffer, buflen, "/", 1);
62399+ if (error)
62400+ break;
62401+
62402+ slash = true;
62403+ dentry = parent;
62404+ }
62405+
62406+out:
62407+ if (!error && !slash)
62408+ error = prepend(buffer, buflen, "/", 1);
62409+
62410+ return error;
62411+}
62412+
62413+/* this must be called with vfsmount_lock and rename_lock held */
62414+
62415+static char *__our_d_path(const struct path *path, struct path *root,
62416+ char *buf, int buflen)
62417+{
62418+ char *res = buf + buflen;
62419+ int error;
62420+
62421+ prepend(&res, &buflen, "\0", 1);
62422+ error = prepend_path(path, root, &res, &buflen);
62423+ if (error)
62424+ return ERR_PTR(error);
62425+
62426+ return res;
62427+}
62428+
62429+static char *
62430+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
62431+{
62432+ char *retval;
62433+
62434+ retval = __our_d_path(path, root, buf, buflen);
62435+ if (unlikely(IS_ERR(retval)))
62436+ retval = strcpy(buf, "<path too long>");
62437+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
62438+ retval[1] = '\0';
62439+
62440+ return retval;
62441+}
62442+
62443+static char *
62444+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
62445+ char *buf, int buflen)
62446+{
62447+ struct path path;
62448+ char *res;
62449+
62450+ path.dentry = (struct dentry *)dentry;
62451+ path.mnt = (struct vfsmount *)vfsmnt;
62452+
62453+ /* we can use real_root.dentry, real_root.mnt, because this is only called
62454+ by the RBAC system */
62455+ res = gen_full_path(&path, &real_root, buf, buflen);
62456+
62457+ return res;
62458+}
62459+
62460+static char *
62461+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
62462+ char *buf, int buflen)
62463+{
62464+ char *res;
62465+ struct path path;
62466+ struct path root;
62467+ struct task_struct *reaper = init_pid_ns.child_reaper;
62468+
62469+ path.dentry = (struct dentry *)dentry;
62470+ path.mnt = (struct vfsmount *)vfsmnt;
62471+
62472+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
62473+ get_fs_root(reaper->fs, &root);
62474+
62475+ br_read_lock(&vfsmount_lock);
62476+ write_seqlock(&rename_lock);
62477+ res = gen_full_path(&path, &root, buf, buflen);
62478+ write_sequnlock(&rename_lock);
62479+ br_read_unlock(&vfsmount_lock);
62480+
62481+ path_put(&root);
62482+ return res;
62483+}
62484+
62485+static char *
62486+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
62487+{
62488+ char *ret;
62489+ br_read_lock(&vfsmount_lock);
62490+ write_seqlock(&rename_lock);
62491+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
62492+ PAGE_SIZE);
62493+ write_sequnlock(&rename_lock);
62494+ br_read_unlock(&vfsmount_lock);
62495+ return ret;
62496+}
62497+
62498+static char *
62499+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
62500+{
62501+ char *ret;
62502+ char *buf;
62503+ int buflen;
62504+
62505+ br_read_lock(&vfsmount_lock);
62506+ write_seqlock(&rename_lock);
62507+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
62508+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
62509+ buflen = (int)(ret - buf);
62510+ if (buflen >= 5)
62511+ prepend(&ret, &buflen, "/proc", 5);
62512+ else
62513+ ret = strcpy(buf, "<path too long>");
62514+ write_sequnlock(&rename_lock);
62515+ br_read_unlock(&vfsmount_lock);
62516+ return ret;
62517+}
62518+
62519+char *
62520+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
62521+{
62522+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
62523+ PAGE_SIZE);
62524+}
62525+
62526+char *
62527+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
62528+{
62529+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
62530+ PAGE_SIZE);
62531+}
62532+
62533+char *
62534+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
62535+{
62536+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
62537+ PAGE_SIZE);
62538+}
62539+
62540+char *
62541+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
62542+{
62543+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
62544+ PAGE_SIZE);
62545+}
62546+
62547+char *
62548+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
62549+{
62550+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
62551+ PAGE_SIZE);
62552+}
62553+
62554+__inline__ __u32
62555+to_gr_audit(const __u32 reqmode)
62556+{
62557+ /* masks off auditable permission flags, then shifts them to create
62558+ auditing flags, and adds the special case of append auditing if
62559+ we're requesting write */
62560+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
62561+}
62562+
62563+struct acl_subject_label *
62564+lookup_subject_map(const struct acl_subject_label *userp)
62565+{
62566+ unsigned int index = gr_shash(userp, subj_map_set.s_size);
62567+ struct subject_map *match;
62568+
62569+ match = subj_map_set.s_hash[index];
62570+
62571+ while (match && match->user != userp)
62572+ match = match->next;
62573+
62574+ if (match != NULL)
62575+ return match->kernel;
62576+ else
62577+ return NULL;
62578+}
62579+
62580+static void
62581+insert_subj_map_entry(struct subject_map *subjmap)
62582+{
62583+ unsigned int index = gr_shash(subjmap->user, subj_map_set.s_size);
62584+ struct subject_map **curr;
62585+
62586+ subjmap->prev = NULL;
62587+
62588+ curr = &subj_map_set.s_hash[index];
62589+ if (*curr != NULL)
62590+ (*curr)->prev = subjmap;
62591+
62592+ subjmap->next = *curr;
62593+ *curr = subjmap;
62594+
62595+ return;
62596+}
62597+
62598+static struct acl_role_label *
62599+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
62600+ const gid_t gid)
62601+{
62602+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
62603+ struct acl_role_label *match;
62604+ struct role_allowed_ip *ipp;
62605+ unsigned int x;
62606+ u32 curr_ip = task->signal->curr_ip;
62607+
62608+ task->signal->saved_ip = curr_ip;
62609+
62610+ match = acl_role_set.r_hash[index];
62611+
62612+ while (match) {
62613+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
62614+ for (x = 0; x < match->domain_child_num; x++) {
62615+ if (match->domain_children[x] == uid)
62616+ goto found;
62617+ }
62618+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
62619+ break;
62620+ match = match->next;
62621+ }
62622+found:
62623+ if (match == NULL) {
62624+ try_group:
62625+ index = gr_rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
62626+ match = acl_role_set.r_hash[index];
62627+
62628+ while (match) {
62629+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
62630+ for (x = 0; x < match->domain_child_num; x++) {
62631+ if (match->domain_children[x] == gid)
62632+ goto found2;
62633+ }
62634+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
62635+ break;
62636+ match = match->next;
62637+ }
62638+found2:
62639+ if (match == NULL)
62640+ match = default_role;
62641+ if (match->allowed_ips == NULL)
62642+ return match;
62643+ else {
62644+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
62645+ if (likely
62646+ ((ntohl(curr_ip) & ipp->netmask) ==
62647+ (ntohl(ipp->addr) & ipp->netmask)))
62648+ return match;
62649+ }
62650+ match = default_role;
62651+ }
62652+ } else if (match->allowed_ips == NULL) {
62653+ return match;
62654+ } else {
62655+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
62656+ if (likely
62657+ ((ntohl(curr_ip) & ipp->netmask) ==
62658+ (ntohl(ipp->addr) & ipp->netmask)))
62659+ return match;
62660+ }
62661+ goto try_group;
62662+ }
62663+
62664+ return match;
62665+}
62666+
62667+struct acl_subject_label *
62668+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
62669+ const struct acl_role_label *role)
62670+{
62671+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
62672+ struct acl_subject_label *match;
62673+
62674+ match = role->subj_hash[index];
62675+
62676+ while (match && (match->inode != ino || match->device != dev ||
62677+ (match->mode & GR_DELETED))) {
62678+ match = match->next;
62679+ }
62680+
62681+ if (match && !(match->mode & GR_DELETED))
62682+ return match;
62683+ else
62684+ return NULL;
62685+}
62686+
62687+struct acl_subject_label *
62688+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
62689+ const struct acl_role_label *role)
62690+{
62691+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
62692+ struct acl_subject_label *match;
62693+
62694+ match = role->subj_hash[index];
62695+
62696+ while (match && (match->inode != ino || match->device != dev ||
62697+ !(match->mode & GR_DELETED))) {
62698+ match = match->next;
62699+ }
62700+
62701+ if (match && (match->mode & GR_DELETED))
62702+ return match;
62703+ else
62704+ return NULL;
62705+}
62706+
62707+static struct acl_object_label *
62708+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
62709+ const struct acl_subject_label *subj)
62710+{
62711+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
62712+ struct acl_object_label *match;
62713+
62714+ match = subj->obj_hash[index];
62715+
62716+ while (match && (match->inode != ino || match->device != dev ||
62717+ (match->mode & GR_DELETED))) {
62718+ match = match->next;
62719+ }
62720+
62721+ if (match && !(match->mode & GR_DELETED))
62722+ return match;
62723+ else
62724+ return NULL;
62725+}
62726+
62727+static struct acl_object_label *
62728+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
62729+ const struct acl_subject_label *subj)
62730+{
62731+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
62732+ struct acl_object_label *match;
62733+
62734+ match = subj->obj_hash[index];
62735+
62736+ while (match && (match->inode != ino || match->device != dev ||
62737+ !(match->mode & GR_DELETED))) {
62738+ match = match->next;
62739+ }
62740+
62741+ if (match && (match->mode & GR_DELETED))
62742+ return match;
62743+
62744+ match = subj->obj_hash[index];
62745+
62746+ while (match && (match->inode != ino || match->device != dev ||
62747+ (match->mode & GR_DELETED))) {
62748+ match = match->next;
62749+ }
62750+
62751+ if (match && !(match->mode & GR_DELETED))
62752+ return match;
62753+ else
62754+ return NULL;
62755+}
62756+
62757+static struct name_entry *
62758+lookup_name_entry(const char *name)
62759+{
62760+ unsigned int len = strlen(name);
62761+ unsigned int key = full_name_hash(name, len);
62762+ unsigned int index = key % name_set.n_size;
62763+ struct name_entry *match;
62764+
62765+ match = name_set.n_hash[index];
62766+
62767+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
62768+ match = match->next;
62769+
62770+ return match;
62771+}
62772+
62773+static struct name_entry *
62774+lookup_name_entry_create(const char *name)
62775+{
62776+ unsigned int len = strlen(name);
62777+ unsigned int key = full_name_hash(name, len);
62778+ unsigned int index = key % name_set.n_size;
62779+ struct name_entry *match;
62780+
62781+ match = name_set.n_hash[index];
62782+
62783+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
62784+ !match->deleted))
62785+ match = match->next;
62786+
62787+ if (match && match->deleted)
62788+ return match;
62789+
62790+ match = name_set.n_hash[index];
62791+
62792+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
62793+ match->deleted))
62794+ match = match->next;
62795+
62796+ if (match && !match->deleted)
62797+ return match;
62798+ else
62799+ return NULL;
62800+}
62801+
62802+static struct inodev_entry *
62803+lookup_inodev_entry(const ino_t ino, const dev_t dev)
62804+{
62805+ unsigned int index = gr_fhash(ino, dev, inodev_set.i_size);
62806+ struct inodev_entry *match;
62807+
62808+ match = inodev_set.i_hash[index];
62809+
62810+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
62811+ match = match->next;
62812+
62813+ return match;
62814+}
62815+
62816+static void
62817+insert_inodev_entry(struct inodev_entry *entry)
62818+{
62819+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
62820+ inodev_set.i_size);
62821+ struct inodev_entry **curr;
62822+
62823+ entry->prev = NULL;
62824+
62825+ curr = &inodev_set.i_hash[index];
62826+ if (*curr != NULL)
62827+ (*curr)->prev = entry;
62828+
62829+ entry->next = *curr;
62830+ *curr = entry;
62831+
62832+ return;
62833+}
62834+
62835+static void
62836+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
62837+{
62838+ unsigned int index =
62839+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
62840+ struct acl_role_label **curr;
62841+ struct acl_role_label *tmp, *tmp2;
62842+
62843+ curr = &acl_role_set.r_hash[index];
62844+
62845+ /* simple case, slot is empty, just set it to our role */
62846+ if (*curr == NULL) {
62847+ *curr = role;
62848+ } else {
62849+ /* example:
62850+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
62851+ 2 -> 3
62852+ */
62853+ /* first check to see if we can already be reached via this slot */
62854+ tmp = *curr;
62855+ while (tmp && tmp != role)
62856+ tmp = tmp->next;
62857+ if (tmp == role) {
62858+ /* we don't need to add ourselves to this slot's chain */
62859+ return;
62860+ }
62861+ /* we need to add ourselves to this chain, two cases */
62862+ if (role->next == NULL) {
62863+ /* simple case, append the current chain to our role */
62864+ role->next = *curr;
62865+ *curr = role;
62866+ } else {
62867+ /* 1 -> 2 -> 3 -> 4
62868+ 2 -> 3 -> 4
62869+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
62870+ */
62871+ /* trickier case: walk our role's chain until we find
62872+ the role for the start of the current slot's chain */
62873+ tmp = role;
62874+ tmp2 = *curr;
62875+ while (tmp->next && tmp->next != tmp2)
62876+ tmp = tmp->next;
62877+ if (tmp->next == tmp2) {
62878+ /* from example above, we found 3, so just
62879+ replace this slot's chain with ours */
62880+ *curr = role;
62881+ } else {
62882+ /* we didn't find a subset of our role's chain
62883+ in the current slot's chain, so append their
62884+ chain to ours, and set us as the first role in
62885+ the slot's chain
62886+
62887+ we could fold this case with the case above,
62888+ but making it explicit for clarity
62889+ */
62890+ tmp->next = tmp2;
62891+ *curr = role;
62892+ }
62893+ }
62894+ }
62895+
62896+ return;
62897+}
62898+
62899+static void
62900+insert_acl_role_label(struct acl_role_label *role)
62901+{
62902+ int i;
62903+
62904+ if (role_list == NULL) {
62905+ role_list = role;
62906+ role->prev = NULL;
62907+ } else {
62908+ role->prev = role_list;
62909+ role_list = role;
62910+ }
62911+
62912+ /* used for hash chains */
62913+ role->next = NULL;
62914+
62915+ if (role->roletype & GR_ROLE_DOMAIN) {
62916+ for (i = 0; i < role->domain_child_num; i++)
62917+ __insert_acl_role_label(role, role->domain_children[i]);
62918+ } else
62919+ __insert_acl_role_label(role, role->uidgid);
62920+}
62921+
62922+static int
62923+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
62924+{
62925+ struct name_entry **curr, *nentry;
62926+ struct inodev_entry *ientry;
62927+ unsigned int len = strlen(name);
62928+ unsigned int key = full_name_hash(name, len);
62929+ unsigned int index = key % name_set.n_size;
62930+
62931+ curr = &name_set.n_hash[index];
62932+
62933+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
62934+ curr = &((*curr)->next);
62935+
62936+ if (*curr != NULL)
62937+ return 1;
62938+
62939+ nentry = acl_alloc(sizeof (struct name_entry));
62940+ if (nentry == NULL)
62941+ return 0;
62942+ ientry = acl_alloc(sizeof (struct inodev_entry));
62943+ if (ientry == NULL)
62944+ return 0;
62945+ ientry->nentry = nentry;
62946+
62947+ nentry->key = key;
62948+ nentry->name = name;
62949+ nentry->inode = inode;
62950+ nentry->device = device;
62951+ nentry->len = len;
62952+ nentry->deleted = deleted;
62953+
62954+ nentry->prev = NULL;
62955+ curr = &name_set.n_hash[index];
62956+ if (*curr != NULL)
62957+ (*curr)->prev = nentry;
62958+ nentry->next = *curr;
62959+ *curr = nentry;
62960+
62961+ /* insert us into the table searchable by inode/dev */
62962+ insert_inodev_entry(ientry);
62963+
62964+ return 1;
62965+}
62966+
62967+static void
62968+insert_acl_obj_label(struct acl_object_label *obj,
62969+ struct acl_subject_label *subj)
62970+{
62971+ unsigned int index =
62972+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
62973+ struct acl_object_label **curr;
62974+
62975+
62976+ obj->prev = NULL;
62977+
62978+ curr = &subj->obj_hash[index];
62979+ if (*curr != NULL)
62980+ (*curr)->prev = obj;
62981+
62982+ obj->next = *curr;
62983+ *curr = obj;
62984+
62985+ return;
62986+}
62987+
62988+static void
62989+insert_acl_subj_label(struct acl_subject_label *obj,
62990+ struct acl_role_label *role)
62991+{
62992+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
62993+ struct acl_subject_label **curr;
62994+
62995+ obj->prev = NULL;
62996+
62997+ curr = &role->subj_hash[index];
62998+ if (*curr != NULL)
62999+ (*curr)->prev = obj;
63000+
63001+ obj->next = *curr;
63002+ *curr = obj;
63003+
63004+ return;
63005+}
63006+
63007+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
63008+
63009+static void *
63010+create_table(__u32 * len, int elementsize)
63011+{
63012+ unsigned int table_sizes[] = {
63013+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
63014+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
63015+ 4194301, 8388593, 16777213, 33554393, 67108859
63016+ };
63017+ void *newtable = NULL;
63018+ unsigned int pwr = 0;
63019+
63020+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
63021+ table_sizes[pwr] <= *len)
63022+ pwr++;
63023+
63024+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
63025+ return newtable;
63026+
63027+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
63028+ newtable =
63029+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
63030+ else
63031+ newtable = vmalloc(table_sizes[pwr] * elementsize);
63032+
63033+ *len = table_sizes[pwr];
63034+
63035+ return newtable;
63036+}
63037+
63038+static int
63039+init_variables(const struct gr_arg *arg)
63040+{
63041+ struct task_struct *reaper = init_pid_ns.child_reaper;
63042+ unsigned int stacksize;
63043+
63044+ subj_map_set.s_size = arg->role_db.num_subjects;
63045+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
63046+ name_set.n_size = arg->role_db.num_objects;
63047+ inodev_set.i_size = arg->role_db.num_objects;
63048+
63049+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
63050+ !name_set.n_size || !inodev_set.i_size)
63051+ return 1;
63052+
63053+ if (!gr_init_uidset())
63054+ return 1;
63055+
63056+ /* set up the stack that holds allocation info */
63057+
63058+ stacksize = arg->role_db.num_pointers + 5;
63059+
63060+ if (!acl_alloc_stack_init(stacksize))
63061+ return 1;
63062+
63063+ /* grab reference for the real root dentry and vfsmount */
63064+ get_fs_root(reaper->fs, &real_root);
63065+
63066+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
63067+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
63068+#endif
63069+
63070+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
63071+ if (fakefs_obj_rw == NULL)
63072+ return 1;
63073+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
63074+
63075+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
63076+ if (fakefs_obj_rwx == NULL)
63077+ return 1;
63078+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
63079+
63080+ subj_map_set.s_hash =
63081+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
63082+ acl_role_set.r_hash =
63083+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
63084+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
63085+ inodev_set.i_hash =
63086+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
63087+
63088+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
63089+ !name_set.n_hash || !inodev_set.i_hash)
63090+ return 1;
63091+
63092+ memset(subj_map_set.s_hash, 0,
63093+ sizeof(struct subject_map *) * subj_map_set.s_size);
63094+ memset(acl_role_set.r_hash, 0,
63095+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
63096+ memset(name_set.n_hash, 0,
63097+ sizeof (struct name_entry *) * name_set.n_size);
63098+ memset(inodev_set.i_hash, 0,
63099+ sizeof (struct inodev_entry *) * inodev_set.i_size);
63100+
63101+ return 0;
63102+}
63103+
63104+/* free information not needed after startup
63105+ currently contains user->kernel pointer mappings for subjects
63106+*/
63107+
63108+static void
63109+free_init_variables(void)
63110+{
63111+ __u32 i;
63112+
63113+ if (subj_map_set.s_hash) {
63114+ for (i = 0; i < subj_map_set.s_size; i++) {
63115+ if (subj_map_set.s_hash[i]) {
63116+ kfree(subj_map_set.s_hash[i]);
63117+ subj_map_set.s_hash[i] = NULL;
63118+ }
63119+ }
63120+
63121+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
63122+ PAGE_SIZE)
63123+ kfree(subj_map_set.s_hash);
63124+ else
63125+ vfree(subj_map_set.s_hash);
63126+ }
63127+
63128+ return;
63129+}
63130+
63131+static void
63132+free_variables(void)
63133+{
63134+ struct acl_subject_label *s;
63135+ struct acl_role_label *r;
63136+ struct task_struct *task, *task2;
63137+ unsigned int x;
63138+
63139+ gr_clear_learn_entries();
63140+
63141+ read_lock(&tasklist_lock);
63142+ do_each_thread(task2, task) {
63143+ task->acl_sp_role = 0;
63144+ task->acl_role_id = 0;
63145+ task->acl = NULL;
63146+ task->role = NULL;
63147+ } while_each_thread(task2, task);
63148+ read_unlock(&tasklist_lock);
63149+
63150+ /* release the reference to the real root dentry and vfsmount */
63151+ path_put(&real_root);
63152+ memset(&real_root, 0, sizeof(real_root));
63153+
63154+ /* free all object hash tables */
63155+
63156+ FOR_EACH_ROLE_START(r)
63157+ if (r->subj_hash == NULL)
63158+ goto next_role;
63159+ FOR_EACH_SUBJECT_START(r, s, x)
63160+ if (s->obj_hash == NULL)
63161+ break;
63162+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
63163+ kfree(s->obj_hash);
63164+ else
63165+ vfree(s->obj_hash);
63166+ FOR_EACH_SUBJECT_END(s, x)
63167+ FOR_EACH_NESTED_SUBJECT_START(r, s)
63168+ if (s->obj_hash == NULL)
63169+ break;
63170+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
63171+ kfree(s->obj_hash);
63172+ else
63173+ vfree(s->obj_hash);
63174+ FOR_EACH_NESTED_SUBJECT_END(s)
63175+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
63176+ kfree(r->subj_hash);
63177+ else
63178+ vfree(r->subj_hash);
63179+ r->subj_hash = NULL;
63180+next_role:
63181+ FOR_EACH_ROLE_END(r)
63182+
63183+ acl_free_all();
63184+
63185+ if (acl_role_set.r_hash) {
63186+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
63187+ PAGE_SIZE)
63188+ kfree(acl_role_set.r_hash);
63189+ else
63190+ vfree(acl_role_set.r_hash);
63191+ }
63192+ if (name_set.n_hash) {
63193+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
63194+ PAGE_SIZE)
63195+ kfree(name_set.n_hash);
63196+ else
63197+ vfree(name_set.n_hash);
63198+ }
63199+
63200+ if (inodev_set.i_hash) {
63201+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
63202+ PAGE_SIZE)
63203+ kfree(inodev_set.i_hash);
63204+ else
63205+ vfree(inodev_set.i_hash);
63206+ }
63207+
63208+ gr_free_uidset();
63209+
63210+ memset(&name_set, 0, sizeof (struct name_db));
63211+ memset(&inodev_set, 0, sizeof (struct inodev_db));
63212+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
63213+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
63214+
63215+ default_role = NULL;
63216+ kernel_role = NULL;
63217+ role_list = NULL;
63218+
63219+ return;
63220+}
63221+
63222+static struct acl_subject_label *
63223+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
63224+
63225+static int alloc_and_copy_string(char **name, unsigned int maxlen)
63226+{
63227+ unsigned int len = strnlen_user(*name, maxlen);
63228+ char *tmp;
63229+
63230+ if (!len || len >= maxlen)
63231+ return -EINVAL;
63232+
63233+ if ((tmp = (char *) acl_alloc(len)) == NULL)
63234+ return -ENOMEM;
63235+
63236+ if (copy_from_user(tmp, *name, len))
63237+ return -EFAULT;
63238+
63239+ tmp[len-1] = '\0';
63240+ *name = tmp;
63241+
63242+ return 0;
63243+}
63244+
63245+static int
63246+copy_user_glob(struct acl_object_label *obj)
63247+{
63248+ struct acl_object_label *g_tmp, **guser;
63249+ int error;
63250+
63251+ if (obj->globbed == NULL)
63252+ return 0;
63253+
63254+ guser = &obj->globbed;
63255+ while (*guser) {
63256+ g_tmp = (struct acl_object_label *)
63257+ acl_alloc(sizeof (struct acl_object_label));
63258+ if (g_tmp == NULL)
63259+ return -ENOMEM;
63260+
63261+ if (copy_acl_object_label(g_tmp, *guser))
63262+ return -EFAULT;
63263+
63264+ error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX);
63265+ if (error)
63266+ return error;
63267+
63268+ *guser = g_tmp;
63269+ guser = &(g_tmp->next);
63270+ }
63271+
63272+ return 0;
63273+}
63274+
63275+static int
63276+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
63277+ struct acl_role_label *role)
63278+{
63279+ struct acl_object_label *o_tmp;
63280+ int ret;
63281+
63282+ while (userp) {
63283+ if ((o_tmp = (struct acl_object_label *)
63284+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
63285+ return -ENOMEM;
63286+
63287+ if (copy_acl_object_label(o_tmp, userp))
63288+ return -EFAULT;
63289+
63290+ userp = o_tmp->prev;
63291+
63292+ ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX);
63293+ if (ret)
63294+ return ret;
63295+
63296+ insert_acl_obj_label(o_tmp, subj);
63297+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
63298+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
63299+ return -ENOMEM;
63300+
63301+ ret = copy_user_glob(o_tmp);
63302+ if (ret)
63303+ return ret;
63304+
63305+ if (o_tmp->nested) {
63306+ int already_copied;
63307+
63308+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
63309+ if (IS_ERR(o_tmp->nested))
63310+ return PTR_ERR(o_tmp->nested);
63311+
63312+ /* insert into nested subject list if we haven't copied this one yet
63313+ to prevent duplicate entries */
63314+ if (!already_copied) {
63315+ o_tmp->nested->next = role->hash->first;
63316+ role->hash->first = o_tmp->nested;
63317+ }
63318+ }
63319+ }
63320+
63321+ return 0;
63322+}
63323+
63324+static __u32
63325+count_user_subjs(struct acl_subject_label *userp)
63326+{
63327+ struct acl_subject_label s_tmp;
63328+ __u32 num = 0;
63329+
63330+ while (userp) {
63331+ if (copy_acl_subject_label(&s_tmp, userp))
63332+ break;
63333+
63334+ userp = s_tmp.prev;
63335+ }
63336+
63337+ return num;
63338+}
63339+
63340+static int
63341+copy_user_allowedips(struct acl_role_label *rolep)
63342+{
63343+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
63344+
63345+ ruserip = rolep->allowed_ips;
63346+
63347+ while (ruserip) {
63348+ rlast = rtmp;
63349+
63350+ if ((rtmp = (struct role_allowed_ip *)
63351+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
63352+ return -ENOMEM;
63353+
63354+ if (copy_role_allowed_ip(rtmp, ruserip))
63355+ return -EFAULT;
63356+
63357+ ruserip = rtmp->prev;
63358+
63359+ if (!rlast) {
63360+ rtmp->prev = NULL;
63361+ rolep->allowed_ips = rtmp;
63362+ } else {
63363+ rlast->next = rtmp;
63364+ rtmp->prev = rlast;
63365+ }
63366+
63367+ if (!ruserip)
63368+ rtmp->next = NULL;
63369+ }
63370+
63371+ return 0;
63372+}
63373+
63374+static int
63375+copy_user_transitions(struct acl_role_label *rolep)
63376+{
63377+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
63378+ int error;
63379+
63380+ rusertp = rolep->transitions;
63381+
63382+ while (rusertp) {
63383+ rlast = rtmp;
63384+
63385+ if ((rtmp = (struct role_transition *)
63386+ acl_alloc(sizeof (struct role_transition))) == NULL)
63387+ return -ENOMEM;
63388+
63389+ if (copy_role_transition(rtmp, rusertp))
63390+ return -EFAULT;
63391+
63392+ rusertp = rtmp->prev;
63393+
63394+ error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN);
63395+ if (error)
63396+ return error;
63397+
63398+ if (!rlast) {
63399+ rtmp->prev = NULL;
63400+ rolep->transitions = rtmp;
63401+ } else {
63402+ rlast->next = rtmp;
63403+ rtmp->prev = rlast;
63404+ }
63405+
63406+ if (!rusertp)
63407+ rtmp->next = NULL;
63408+ }
63409+
63410+ return 0;
63411+}
63412+
63413+static __u32 count_user_objs(const struct acl_object_label __user *userp)
63414+{
63415+ struct acl_object_label o_tmp;
63416+ __u32 num = 0;
63417+
63418+ while (userp) {
63419+ if (copy_acl_object_label(&o_tmp, userp))
63420+ break;
63421+
63422+ userp = o_tmp.prev;
63423+ num++;
63424+ }
63425+
63426+ return num;
63427+}
63428+
63429+static struct acl_subject_label *
63430+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
63431+{
63432+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
63433+ __u32 num_objs;
63434+ struct acl_ip_label **i_tmp, *i_utmp2;
63435+ struct gr_hash_struct ghash;
63436+ struct subject_map *subjmap;
63437+ unsigned int i_num;
63438+ int err;
63439+
63440+ if (already_copied != NULL)
63441+ *already_copied = 0;
63442+
63443+ s_tmp = lookup_subject_map(userp);
63444+
63445+ /* we've already copied this subject into the kernel, just return
63446+ the reference to it, and don't copy it over again
63447+ */
63448+ if (s_tmp) {
63449+ if (already_copied != NULL)
63450+ *already_copied = 1;
63451+ return(s_tmp);
63452+ }
63453+
63454+ if ((s_tmp = (struct acl_subject_label *)
63455+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
63456+ return ERR_PTR(-ENOMEM);
63457+
63458+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
63459+ if (subjmap == NULL)
63460+ return ERR_PTR(-ENOMEM);
63461+
63462+ subjmap->user = userp;
63463+ subjmap->kernel = s_tmp;
63464+ insert_subj_map_entry(subjmap);
63465+
63466+ if (copy_acl_subject_label(s_tmp, userp))
63467+ return ERR_PTR(-EFAULT);
63468+
63469+ err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX);
63470+ if (err)
63471+ return ERR_PTR(err);
63472+
63473+ if (!strcmp(s_tmp->filename, "/"))
63474+ role->root_label = s_tmp;
63475+
63476+ if (copy_gr_hash_struct(&ghash, s_tmp->hash))
63477+ return ERR_PTR(-EFAULT);
63478+
63479+ /* copy user and group transition tables */
63480+
63481+ if (s_tmp->user_trans_num) {
63482+ uid_t *uidlist;
63483+
63484+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
63485+ if (uidlist == NULL)
63486+ return ERR_PTR(-ENOMEM);
63487+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
63488+ return ERR_PTR(-EFAULT);
63489+
63490+ s_tmp->user_transitions = uidlist;
63491+ }
63492+
63493+ if (s_tmp->group_trans_num) {
63494+ gid_t *gidlist;
63495+
63496+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
63497+ if (gidlist == NULL)
63498+ return ERR_PTR(-ENOMEM);
63499+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
63500+ return ERR_PTR(-EFAULT);
63501+
63502+ s_tmp->group_transitions = gidlist;
63503+ }
63504+
63505+ /* set up object hash table */
63506+ num_objs = count_user_objs(ghash.first);
63507+
63508+ s_tmp->obj_hash_size = num_objs;
63509+ s_tmp->obj_hash =
63510+ (struct acl_object_label **)
63511+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
63512+
63513+ if (!s_tmp->obj_hash)
63514+ return ERR_PTR(-ENOMEM);
63515+
63516+ memset(s_tmp->obj_hash, 0,
63517+ s_tmp->obj_hash_size *
63518+ sizeof (struct acl_object_label *));
63519+
63520+ /* add in objects */
63521+ err = copy_user_objs(ghash.first, s_tmp, role);
63522+
63523+ if (err)
63524+ return ERR_PTR(err);
63525+
63526+ /* set pointer for parent subject */
63527+ if (s_tmp->parent_subject) {
63528+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
63529+
63530+ if (IS_ERR(s_tmp2))
63531+ return s_tmp2;
63532+
63533+ s_tmp->parent_subject = s_tmp2;
63534+ }
63535+
63536+ /* add in ip acls */
63537+
63538+ if (!s_tmp->ip_num) {
63539+ s_tmp->ips = NULL;
63540+ goto insert;
63541+ }
63542+
63543+ i_tmp =
63544+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
63545+ sizeof (struct acl_ip_label *));
63546+
63547+ if (!i_tmp)
63548+ return ERR_PTR(-ENOMEM);
63549+
63550+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
63551+ *(i_tmp + i_num) =
63552+ (struct acl_ip_label *)
63553+ acl_alloc(sizeof (struct acl_ip_label));
63554+ if (!*(i_tmp + i_num))
63555+ return ERR_PTR(-ENOMEM);
63556+
63557+ if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips))
63558+ return ERR_PTR(-EFAULT);
63559+
63560+ if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2))
63561+ return ERR_PTR(-EFAULT);
63562+
63563+ if ((*(i_tmp + i_num))->iface == NULL)
63564+ continue;
63565+
63566+ err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ);
63567+ if (err)
63568+ return ERR_PTR(err);
63569+ }
63570+
63571+ s_tmp->ips = i_tmp;
63572+
63573+insert:
63574+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
63575+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
63576+ return ERR_PTR(-ENOMEM);
63577+
63578+ return s_tmp;
63579+}
63580+
63581+static int
63582+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
63583+{
63584+ struct acl_subject_label s_pre;
63585+ struct acl_subject_label * ret;
63586+ int err;
63587+
63588+ while (userp) {
63589+ if (copy_acl_subject_label(&s_pre, userp))
63590+ return -EFAULT;
63591+
63592+ ret = do_copy_user_subj(userp, role, NULL);
63593+
63594+ err = PTR_ERR(ret);
63595+ if (IS_ERR(ret))
63596+ return err;
63597+
63598+ insert_acl_subj_label(ret, role);
63599+
63600+ userp = s_pre.prev;
63601+ }
63602+
63603+ return 0;
63604+}
63605+
63606+static int
63607+copy_user_acl(struct gr_arg *arg)
63608+{
63609+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
63610+ struct acl_subject_label *subj_list;
63611+ struct sprole_pw *sptmp;
63612+ struct gr_hash_struct *ghash;
63613+ uid_t *domainlist;
63614+ unsigned int r_num;
63615+ int err = 0;
63616+ __u16 i;
63617+ __u32 num_subjs;
63618+
63619+ /* we need a default and kernel role */
63620+ if (arg->role_db.num_roles < 2)
63621+ return -EINVAL;
63622+
63623+ /* copy special role authentication info from userspace */
63624+
63625+ num_sprole_pws = arg->num_sprole_pws;
63626+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
63627+
63628+ if (!acl_special_roles && num_sprole_pws)
63629+ return -ENOMEM;
63630+
63631+ for (i = 0; i < num_sprole_pws; i++) {
63632+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
63633+ if (!sptmp)
63634+ return -ENOMEM;
63635+ if (copy_sprole_pw(sptmp, i, arg->sprole_pws))
63636+ return -EFAULT;
63637+
63638+ err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN);
63639+ if (err)
63640+ return err;
63641+
63642+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
63643+ printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename);
63644+#endif
63645+
63646+ acl_special_roles[i] = sptmp;
63647+ }
63648+
63649+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
63650+
63651+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
63652+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
63653+
63654+ if (!r_tmp)
63655+ return -ENOMEM;
63656+
63657+ if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp))
63658+ return -EFAULT;
63659+
63660+ if (copy_acl_role_label(r_tmp, r_utmp2))
63661+ return -EFAULT;
63662+
63663+ err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN);
63664+ if (err)
63665+ return err;
63666+
63667+ if (!strcmp(r_tmp->rolename, "default")
63668+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
63669+ default_role = r_tmp;
63670+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
63671+ kernel_role = r_tmp;
63672+ }
63673+
63674+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
63675+ return -ENOMEM;
63676+
63677+ if (copy_gr_hash_struct(ghash, r_tmp->hash))
63678+ return -EFAULT;
63679+
63680+ r_tmp->hash = ghash;
63681+
63682+ num_subjs = count_user_subjs(r_tmp->hash->first);
63683+
63684+ r_tmp->subj_hash_size = num_subjs;
63685+ r_tmp->subj_hash =
63686+ (struct acl_subject_label **)
63687+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
63688+
63689+ if (!r_tmp->subj_hash)
63690+ return -ENOMEM;
63691+
63692+ err = copy_user_allowedips(r_tmp);
63693+ if (err)
63694+ return err;
63695+
63696+ /* copy domain info */
63697+ if (r_tmp->domain_children != NULL) {
63698+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
63699+ if (domainlist == NULL)
63700+ return -ENOMEM;
63701+
63702+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
63703+ return -EFAULT;
63704+
63705+ r_tmp->domain_children = domainlist;
63706+ }
63707+
63708+ err = copy_user_transitions(r_tmp);
63709+ if (err)
63710+ return err;
63711+
63712+ memset(r_tmp->subj_hash, 0,
63713+ r_tmp->subj_hash_size *
63714+ sizeof (struct acl_subject_label *));
63715+
63716+ /* acquire the list of subjects, then NULL out
63717+ the list prior to parsing the subjects for this role,
63718+ as during this parsing the list is replaced with a list
63719+ of *nested* subjects for the role
63720+ */
63721+ subj_list = r_tmp->hash->first;
63722+
63723+ /* set nested subject list to null */
63724+ r_tmp->hash->first = NULL;
63725+
63726+ err = copy_user_subjs(subj_list, r_tmp);
63727+
63728+ if (err)
63729+ return err;
63730+
63731+ insert_acl_role_label(r_tmp);
63732+ }
63733+
63734+ if (default_role == NULL || kernel_role == NULL)
63735+ return -EINVAL;
63736+
63737+ return err;
63738+}
63739+
63740+static int
63741+gracl_init(struct gr_arg *args)
63742+{
63743+ int error = 0;
63744+
63745+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
63746+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
63747+
63748+ if (init_variables(args)) {
63749+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
63750+ error = -ENOMEM;
63751+ free_variables();
63752+ goto out;
63753+ }
63754+
63755+ error = copy_user_acl(args);
63756+ free_init_variables();
63757+ if (error) {
63758+ free_variables();
63759+ goto out;
63760+ }
63761+
63762+ if ((error = gr_set_acls(0))) {
63763+ free_variables();
63764+ goto out;
63765+ }
63766+
63767+ pax_open_kernel();
63768+ gr_status |= GR_READY;
63769+ pax_close_kernel();
63770+
63771+ out:
63772+ return error;
63773+}
63774+
63775+/* derived from glibc fnmatch() 0: match, 1: no match*/
63776+
63777+static int
63778+glob_match(const char *p, const char *n)
63779+{
63780+ char c;
63781+
63782+ while ((c = *p++) != '\0') {
63783+ switch (c) {
63784+ case '?':
63785+ if (*n == '\0')
63786+ return 1;
63787+ else if (*n == '/')
63788+ return 1;
63789+ break;
63790+ case '\\':
63791+ if (*n != c)
63792+ return 1;
63793+ break;
63794+ case '*':
63795+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
63796+ if (*n == '/')
63797+ return 1;
63798+ else if (c == '?') {
63799+ if (*n == '\0')
63800+ return 1;
63801+ else
63802+ ++n;
63803+ }
63804+ }
63805+ if (c == '\0') {
63806+ return 0;
63807+ } else {
63808+ const char *endp;
63809+
63810+ if ((endp = strchr(n, '/')) == NULL)
63811+ endp = n + strlen(n);
63812+
63813+ if (c == '[') {
63814+ for (--p; n < endp; ++n)
63815+ if (!glob_match(p, n))
63816+ return 0;
63817+ } else if (c == '/') {
63818+ while (*n != '\0' && *n != '/')
63819+ ++n;
63820+ if (*n == '/' && !glob_match(p, n + 1))
63821+ return 0;
63822+ } else {
63823+ for (--p; n < endp; ++n)
63824+ if (*n == c && !glob_match(p, n))
63825+ return 0;
63826+ }
63827+
63828+ return 1;
63829+ }
63830+ case '[':
63831+ {
63832+ int not;
63833+ char cold;
63834+
63835+ if (*n == '\0' || *n == '/')
63836+ return 1;
63837+
63838+ not = (*p == '!' || *p == '^');
63839+ if (not)
63840+ ++p;
63841+
63842+ c = *p++;
63843+ for (;;) {
63844+ unsigned char fn = (unsigned char)*n;
63845+
63846+ if (c == '\0')
63847+ return 1;
63848+ else {
63849+ if (c == fn)
63850+ goto matched;
63851+ cold = c;
63852+ c = *p++;
63853+
63854+ if (c == '-' && *p != ']') {
63855+ unsigned char cend = *p++;
63856+
63857+ if (cend == '\0')
63858+ return 1;
63859+
63860+ if (cold <= fn && fn <= cend)
63861+ goto matched;
63862+
63863+ c = *p++;
63864+ }
63865+ }
63866+
63867+ if (c == ']')
63868+ break;
63869+ }
63870+ if (!not)
63871+ return 1;
63872+ break;
63873+ matched:
63874+ while (c != ']') {
63875+ if (c == '\0')
63876+ return 1;
63877+
63878+ c = *p++;
63879+ }
63880+ if (not)
63881+ return 1;
63882+ }
63883+ break;
63884+ default:
63885+ if (c != *n)
63886+ return 1;
63887+ }
63888+
63889+ ++n;
63890+ }
63891+
63892+ if (*n == '\0')
63893+ return 0;
63894+
63895+ if (*n == '/')
63896+ return 0;
63897+
63898+ return 1;
63899+}
63900+
63901+static struct acl_object_label *
63902+chk_glob_label(struct acl_object_label *globbed,
63903+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
63904+{
63905+ struct acl_object_label *tmp;
63906+
63907+ if (*path == NULL)
63908+ *path = gr_to_filename_nolock(dentry, mnt);
63909+
63910+ tmp = globbed;
63911+
63912+ while (tmp) {
63913+ if (!glob_match(tmp->filename, *path))
63914+ return tmp;
63915+ tmp = tmp->next;
63916+ }
63917+
63918+ return NULL;
63919+}
63920+
63921+static struct acl_object_label *
63922+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
63923+ const ino_t curr_ino, const dev_t curr_dev,
63924+ const struct acl_subject_label *subj, char **path, const int checkglob)
63925+{
63926+ struct acl_subject_label *tmpsubj;
63927+ struct acl_object_label *retval;
63928+ struct acl_object_label *retval2;
63929+
63930+ tmpsubj = (struct acl_subject_label *) subj;
63931+ read_lock(&gr_inode_lock);
63932+ do {
63933+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
63934+ if (retval) {
63935+ if (checkglob && retval->globbed) {
63936+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
63937+ if (retval2)
63938+ retval = retval2;
63939+ }
63940+ break;
63941+ }
63942+ } while ((tmpsubj = tmpsubj->parent_subject));
63943+ read_unlock(&gr_inode_lock);
63944+
63945+ return retval;
63946+}
63947+
63948+static __inline__ struct acl_object_label *
63949+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
63950+ struct dentry *curr_dentry,
63951+ const struct acl_subject_label *subj, char **path, const int checkglob)
63952+{
63953+ int newglob = checkglob;
63954+ ino_t inode;
63955+ dev_t device;
63956+
63957+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
63958+ as we don't want a / * rule to match instead of the / object
63959+ don't do this for create lookups that call this function though, since they're looking up
63960+ on the parent and thus need globbing checks on all paths
63961+ */
63962+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
63963+ newglob = GR_NO_GLOB;
63964+
63965+ spin_lock(&curr_dentry->d_lock);
63966+ inode = curr_dentry->d_inode->i_ino;
63967+ device = __get_dev(curr_dentry);
63968+ spin_unlock(&curr_dentry->d_lock);
63969+
63970+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
63971+}
63972+
63973+#ifdef CONFIG_HUGETLBFS
63974+static inline bool
63975+is_hugetlbfs_mnt(const struct vfsmount *mnt)
63976+{
63977+ int i;
63978+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
63979+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
63980+ return true;
63981+ }
63982+
63983+ return false;
63984+}
63985+#endif
63986+
63987+static struct acl_object_label *
63988+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
63989+ const struct acl_subject_label *subj, char *path, const int checkglob)
63990+{
63991+ struct dentry *dentry = (struct dentry *) l_dentry;
63992+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
63993+ struct mount *real_mnt = real_mount(mnt);
63994+ struct acl_object_label *retval;
63995+ struct dentry *parent;
63996+
63997+ br_read_lock(&vfsmount_lock);
63998+ write_seqlock(&rename_lock);
63999+
64000+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
64001+#ifdef CONFIG_NET
64002+ mnt == sock_mnt ||
64003+#endif
64004+#ifdef CONFIG_HUGETLBFS
64005+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
64006+#endif
64007+ /* ignore Eric Biederman */
64008+ IS_PRIVATE(l_dentry->d_inode))) {
64009+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
64010+ goto out;
64011+ }
64012+
64013+ for (;;) {
64014+ if (dentry == real_root.dentry && mnt == real_root.mnt)
64015+ break;
64016+
64017+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
64018+ if (!mnt_has_parent(real_mnt))
64019+ break;
64020+
64021+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
64022+ if (retval != NULL)
64023+ goto out;
64024+
64025+ dentry = real_mnt->mnt_mountpoint;
64026+ real_mnt = real_mnt->mnt_parent;
64027+ mnt = &real_mnt->mnt;
64028+ continue;
64029+ }
64030+
64031+ parent = dentry->d_parent;
64032+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
64033+ if (retval != NULL)
64034+ goto out;
64035+
64036+ dentry = parent;
64037+ }
64038+
64039+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
64040+
64041+ /* real_root is pinned so we don't have to hold a reference */
64042+ if (retval == NULL)
64043+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
64044+out:
64045+ write_sequnlock(&rename_lock);
64046+ br_read_unlock(&vfsmount_lock);
64047+
64048+ BUG_ON(retval == NULL);
64049+
64050+ return retval;
64051+}
64052+
64053+static __inline__ struct acl_object_label *
64054+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
64055+ const struct acl_subject_label *subj)
64056+{
64057+ char *path = NULL;
64058+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
64059+}
64060+
64061+static __inline__ struct acl_object_label *
64062+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
64063+ const struct acl_subject_label *subj)
64064+{
64065+ char *path = NULL;
64066+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
64067+}
64068+
64069+static __inline__ struct acl_object_label *
64070+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
64071+ const struct acl_subject_label *subj, char *path)
64072+{
64073+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
64074+}
64075+
64076+static struct acl_subject_label *
64077+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
64078+ const struct acl_role_label *role)
64079+{
64080+ struct dentry *dentry = (struct dentry *) l_dentry;
64081+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
64082+ struct mount *real_mnt = real_mount(mnt);
64083+ struct acl_subject_label *retval;
64084+ struct dentry *parent;
64085+
64086+ br_read_lock(&vfsmount_lock);
64087+ write_seqlock(&rename_lock);
64088+
64089+ for (;;) {
64090+ if (dentry == real_root.dentry && mnt == real_root.mnt)
64091+ break;
64092+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
64093+ if (!mnt_has_parent(real_mnt))
64094+ break;
64095+
64096+ spin_lock(&dentry->d_lock);
64097+ read_lock(&gr_inode_lock);
64098+ retval =
64099+ lookup_acl_subj_label(dentry->d_inode->i_ino,
64100+ __get_dev(dentry), role);
64101+ read_unlock(&gr_inode_lock);
64102+ spin_unlock(&dentry->d_lock);
64103+ if (retval != NULL)
64104+ goto out;
64105+
64106+ dentry = real_mnt->mnt_mountpoint;
64107+ real_mnt = real_mnt->mnt_parent;
64108+ mnt = &real_mnt->mnt;
64109+ continue;
64110+ }
64111+
64112+ spin_lock(&dentry->d_lock);
64113+ read_lock(&gr_inode_lock);
64114+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
64115+ __get_dev(dentry), role);
64116+ read_unlock(&gr_inode_lock);
64117+ parent = dentry->d_parent;
64118+ spin_unlock(&dentry->d_lock);
64119+
64120+ if (retval != NULL)
64121+ goto out;
64122+
64123+ dentry = parent;
64124+ }
64125+
64126+ spin_lock(&dentry->d_lock);
64127+ read_lock(&gr_inode_lock);
64128+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
64129+ __get_dev(dentry), role);
64130+ read_unlock(&gr_inode_lock);
64131+ spin_unlock(&dentry->d_lock);
64132+
64133+ if (unlikely(retval == NULL)) {
64134+ /* real_root is pinned, we don't need to hold a reference */
64135+ read_lock(&gr_inode_lock);
64136+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
64137+ __get_dev(real_root.dentry), role);
64138+ read_unlock(&gr_inode_lock);
64139+ }
64140+out:
64141+ write_sequnlock(&rename_lock);
64142+ br_read_unlock(&vfsmount_lock);
64143+
64144+ BUG_ON(retval == NULL);
64145+
64146+ return retval;
64147+}
64148+
64149+static void
64150+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
64151+{
64152+ struct task_struct *task = current;
64153+ const struct cred *cred = current_cred();
64154+
64155+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
64156+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
64157+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
64158+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
64159+
64160+ return;
64161+}
64162+
64163+static void
64164+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
64165+{
64166+ struct task_struct *task = current;
64167+ const struct cred *cred = current_cred();
64168+
64169+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
64170+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
64171+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
64172+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
64173+
64174+ return;
64175+}
64176+
64177+static void
64178+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
64179+{
64180+ struct task_struct *task = current;
64181+ const struct cred *cred = current_cred();
64182+
64183+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
64184+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
64185+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
64186+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
64187+
64188+ return;
64189+}
64190+
64191+__u32
64192+gr_search_file(const struct dentry * dentry, const __u32 mode,
64193+ const struct vfsmount * mnt)
64194+{
64195+ __u32 retval = mode;
64196+ struct acl_subject_label *curracl;
64197+ struct acl_object_label *currobj;
64198+
64199+ if (unlikely(!(gr_status & GR_READY)))
64200+ return (mode & ~GR_AUDITS);
64201+
64202+ curracl = current->acl;
64203+
64204+ currobj = chk_obj_label(dentry, mnt, curracl);
64205+ retval = currobj->mode & mode;
64206+
64207+ /* if we're opening a specified transfer file for writing
64208+ (e.g. /dev/initctl), then transfer our role to init
64209+ */
64210+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
64211+ current->role->roletype & GR_ROLE_PERSIST)) {
64212+ struct task_struct *task = init_pid_ns.child_reaper;
64213+
64214+ if (task->role != current->role) {
64215+ task->acl_sp_role = 0;
64216+ task->acl_role_id = current->acl_role_id;
64217+ task->role = current->role;
64218+ rcu_read_lock();
64219+ read_lock(&grsec_exec_file_lock);
64220+ gr_apply_subject_to_task(task);
64221+ read_unlock(&grsec_exec_file_lock);
64222+ rcu_read_unlock();
64223+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
64224+ }
64225+ }
64226+
64227+ if (unlikely
64228+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
64229+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
64230+ __u32 new_mode = mode;
64231+
64232+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
64233+
64234+ retval = new_mode;
64235+
64236+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
64237+ new_mode |= GR_INHERIT;
64238+
64239+ if (!(mode & GR_NOLEARN))
64240+ gr_log_learn(dentry, mnt, new_mode);
64241+ }
64242+
64243+ return retval;
64244+}
64245+
64246+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
64247+ const struct dentry *parent,
64248+ const struct vfsmount *mnt)
64249+{
64250+ struct name_entry *match;
64251+ struct acl_object_label *matchpo;
64252+ struct acl_subject_label *curracl;
64253+ char *path;
64254+
64255+ if (unlikely(!(gr_status & GR_READY)))
64256+ return NULL;
64257+
64258+ preempt_disable();
64259+ path = gr_to_filename_rbac(new_dentry, mnt);
64260+ match = lookup_name_entry_create(path);
64261+
64262+ curracl = current->acl;
64263+
64264+ if (match) {
64265+ read_lock(&gr_inode_lock);
64266+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
64267+ read_unlock(&gr_inode_lock);
64268+
64269+ if (matchpo) {
64270+ preempt_enable();
64271+ return matchpo;
64272+ }
64273+ }
64274+
64275+ // lookup parent
64276+
64277+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
64278+
64279+ preempt_enable();
64280+ return matchpo;
64281+}
64282+
64283+__u32
64284+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
64285+ const struct vfsmount * mnt, const __u32 mode)
64286+{
64287+ struct acl_object_label *matchpo;
64288+ __u32 retval;
64289+
64290+ if (unlikely(!(gr_status & GR_READY)))
64291+ return (mode & ~GR_AUDITS);
64292+
64293+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
64294+
64295+ retval = matchpo->mode & mode;
64296+
64297+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
64298+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
64299+ __u32 new_mode = mode;
64300+
64301+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
64302+
64303+ gr_log_learn(new_dentry, mnt, new_mode);
64304+ return new_mode;
64305+ }
64306+
64307+ return retval;
64308+}
64309+
64310+__u32
64311+gr_check_link(const struct dentry * new_dentry,
64312+ const struct dentry * parent_dentry,
64313+ const struct vfsmount * parent_mnt,
64314+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
64315+{
64316+ struct acl_object_label *obj;
64317+ __u32 oldmode, newmode;
64318+ __u32 needmode;
64319+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
64320+ GR_DELETE | GR_INHERIT;
64321+
64322+ if (unlikely(!(gr_status & GR_READY)))
64323+ return (GR_CREATE | GR_LINK);
64324+
64325+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
64326+ oldmode = obj->mode;
64327+
64328+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
64329+ newmode = obj->mode;
64330+
64331+ needmode = newmode & checkmodes;
64332+
64333+ // old name for hardlink must have at least the permissions of the new name
64334+ if ((oldmode & needmode) != needmode)
64335+ goto bad;
64336+
64337+ // if old name had restrictions/auditing, make sure the new name does as well
64338+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
64339+
64340+ // don't allow hardlinking of suid/sgid/fcapped files without permission
64341+ if (is_privileged_binary(old_dentry))
64342+ needmode |= GR_SETID;
64343+
64344+ if ((newmode & needmode) != needmode)
64345+ goto bad;
64346+
64347+ // enforce minimum permissions
64348+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
64349+ return newmode;
64350+bad:
64351+ needmode = oldmode;
64352+ if (is_privileged_binary(old_dentry))
64353+ needmode |= GR_SETID;
64354+
64355+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
64356+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
64357+ return (GR_CREATE | GR_LINK);
64358+ } else if (newmode & GR_SUPPRESS)
64359+ return GR_SUPPRESS;
64360+ else
64361+ return 0;
64362+}
64363+
64364+int
64365+gr_check_hidden_task(const struct task_struct *task)
64366+{
64367+ if (unlikely(!(gr_status & GR_READY)))
64368+ return 0;
64369+
64370+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
64371+ return 1;
64372+
64373+ return 0;
64374+}
64375+
64376+int
64377+gr_check_protected_task(const struct task_struct *task)
64378+{
64379+ if (unlikely(!(gr_status & GR_READY) || !task))
64380+ return 0;
64381+
64382+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
64383+ task->acl != current->acl)
64384+ return 1;
64385+
64386+ return 0;
64387+}
64388+
64389+int
64390+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
64391+{
64392+ struct task_struct *p;
64393+ int ret = 0;
64394+
64395+ if (unlikely(!(gr_status & GR_READY) || !pid))
64396+ return ret;
64397+
64398+ read_lock(&tasklist_lock);
64399+ do_each_pid_task(pid, type, p) {
64400+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
64401+ p->acl != current->acl) {
64402+ ret = 1;
64403+ goto out;
64404+ }
64405+ } while_each_pid_task(pid, type, p);
64406+out:
64407+ read_unlock(&tasklist_lock);
64408+
64409+ return ret;
64410+}
64411+
64412+void
64413+gr_copy_label(struct task_struct *tsk)
64414+{
64415+ tsk->signal->used_accept = 0;
64416+ tsk->acl_sp_role = 0;
64417+ tsk->acl_role_id = current->acl_role_id;
64418+ tsk->acl = current->acl;
64419+ tsk->role = current->role;
64420+ tsk->signal->curr_ip = current->signal->curr_ip;
64421+ tsk->signal->saved_ip = current->signal->saved_ip;
64422+ if (current->exec_file)
64423+ get_file(current->exec_file);
64424+ tsk->exec_file = current->exec_file;
64425+ tsk->is_writable = current->is_writable;
64426+ if (unlikely(current->signal->used_accept)) {
64427+ current->signal->curr_ip = 0;
64428+ current->signal->saved_ip = 0;
64429+ }
64430+
64431+ return;
64432+}
64433+
64434+static void
64435+gr_set_proc_res(struct task_struct *task)
64436+{
64437+ struct acl_subject_label *proc;
64438+ unsigned short i;
64439+
64440+ proc = task->acl;
64441+
64442+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
64443+ return;
64444+
64445+ for (i = 0; i < RLIM_NLIMITS; i++) {
64446+ if (!(proc->resmask & (1U << i)))
64447+ continue;
64448+
64449+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
64450+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
64451+
64452+ if (i == RLIMIT_CPU)
64453+ update_rlimit_cpu(task, proc->res[i].rlim_cur);
64454+ }
64455+
64456+ return;
64457+}
64458+
64459+extern int gr_process_kernel_setuid_ban(struct user_struct *user);
64460+
64461+int
64462+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
64463+{
64464+ unsigned int i;
64465+ __u16 num;
64466+ uid_t *uidlist;
64467+ uid_t curuid;
64468+ int realok = 0;
64469+ int effectiveok = 0;
64470+ int fsok = 0;
64471+ uid_t globalreal, globaleffective, globalfs;
64472+
64473+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT)
64474+ struct user_struct *user;
64475+
64476+ if (!uid_valid(real))
64477+ goto skipit;
64478+
64479+ /* find user based on global namespace */
64480+
64481+ globalreal = GR_GLOBAL_UID(real);
64482+
64483+ user = find_user(make_kuid(&init_user_ns, globalreal));
64484+ if (user == NULL)
64485+ goto skipit;
64486+
64487+ if (gr_process_kernel_setuid_ban(user)) {
64488+ /* for find_user */
64489+ free_uid(user);
64490+ return 1;
64491+ }
64492+
64493+ /* for find_user */
64494+ free_uid(user);
64495+
64496+skipit:
64497+#endif
64498+
64499+ if (unlikely(!(gr_status & GR_READY)))
64500+ return 0;
64501+
64502+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
64503+ gr_log_learn_uid_change(real, effective, fs);
64504+
64505+ num = current->acl->user_trans_num;
64506+ uidlist = current->acl->user_transitions;
64507+
64508+ if (uidlist == NULL)
64509+ return 0;
64510+
64511+ if (!uid_valid(real)) {
64512+ realok = 1;
64513+ globalreal = (uid_t)-1;
64514+ } else {
64515+ globalreal = GR_GLOBAL_UID(real);
64516+ }
64517+ if (!uid_valid(effective)) {
64518+ effectiveok = 1;
64519+ globaleffective = (uid_t)-1;
64520+ } else {
64521+ globaleffective = GR_GLOBAL_UID(effective);
64522+ }
64523+ if (!uid_valid(fs)) {
64524+ fsok = 1;
64525+ globalfs = (uid_t)-1;
64526+ } else {
64527+ globalfs = GR_GLOBAL_UID(fs);
64528+ }
64529+
64530+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
64531+ for (i = 0; i < num; i++) {
64532+ curuid = uidlist[i];
64533+ if (globalreal == curuid)
64534+ realok = 1;
64535+ if (globaleffective == curuid)
64536+ effectiveok = 1;
64537+ if (globalfs == curuid)
64538+ fsok = 1;
64539+ }
64540+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
64541+ for (i = 0; i < num; i++) {
64542+ curuid = uidlist[i];
64543+ if (globalreal == curuid)
64544+ break;
64545+ if (globaleffective == curuid)
64546+ break;
64547+ if (globalfs == curuid)
64548+ break;
64549+ }
64550+ /* not in deny list */
64551+ if (i == num) {
64552+ realok = 1;
64553+ effectiveok = 1;
64554+ fsok = 1;
64555+ }
64556+ }
64557+
64558+ if (realok && effectiveok && fsok)
64559+ return 0;
64560+ else {
64561+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
64562+ return 1;
64563+ }
64564+}
64565+
64566+int
64567+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
64568+{
64569+ unsigned int i;
64570+ __u16 num;
64571+ gid_t *gidlist;
64572+ gid_t curgid;
64573+ int realok = 0;
64574+ int effectiveok = 0;
64575+ int fsok = 0;
64576+ gid_t globalreal, globaleffective, globalfs;
64577+
64578+ if (unlikely(!(gr_status & GR_READY)))
64579+ return 0;
64580+
64581+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
64582+ gr_log_learn_gid_change(real, effective, fs);
64583+
64584+ num = current->acl->group_trans_num;
64585+ gidlist = current->acl->group_transitions;
64586+
64587+ if (gidlist == NULL)
64588+ return 0;
64589+
64590+ if (!gid_valid(real)) {
64591+ realok = 1;
64592+ globalreal = (gid_t)-1;
64593+ } else {
64594+ globalreal = GR_GLOBAL_GID(real);
64595+ }
64596+ if (!gid_valid(effective)) {
64597+ effectiveok = 1;
64598+ globaleffective = (gid_t)-1;
64599+ } else {
64600+ globaleffective = GR_GLOBAL_GID(effective);
64601+ }
64602+ if (!gid_valid(fs)) {
64603+ fsok = 1;
64604+ globalfs = (gid_t)-1;
64605+ } else {
64606+ globalfs = GR_GLOBAL_GID(fs);
64607+ }
64608+
64609+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
64610+ for (i = 0; i < num; i++) {
64611+ curgid = gidlist[i];
64612+ if (globalreal == curgid)
64613+ realok = 1;
64614+ if (globaleffective == curgid)
64615+ effectiveok = 1;
64616+ if (globalfs == curgid)
64617+ fsok = 1;
64618+ }
64619+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
64620+ for (i = 0; i < num; i++) {
64621+ curgid = gidlist[i];
64622+ if (globalreal == curgid)
64623+ break;
64624+ if (globaleffective == curgid)
64625+ break;
64626+ if (globalfs == curgid)
64627+ break;
64628+ }
64629+ /* not in deny list */
64630+ if (i == num) {
64631+ realok = 1;
64632+ effectiveok = 1;
64633+ fsok = 1;
64634+ }
64635+ }
64636+
64637+ if (realok && effectiveok && fsok)
64638+ return 0;
64639+ else {
64640+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
64641+ return 1;
64642+ }
64643+}
64644+
64645+extern int gr_acl_is_capable(const int cap);
64646+
64647+void
64648+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
64649+{
64650+ struct acl_role_label *role = task->role;
64651+ struct acl_subject_label *subj = NULL;
64652+ struct acl_object_label *obj;
64653+ struct file *filp;
64654+ uid_t uid;
64655+ gid_t gid;
64656+
64657+ if (unlikely(!(gr_status & GR_READY)))
64658+ return;
64659+
64660+ uid = GR_GLOBAL_UID(kuid);
64661+ gid = GR_GLOBAL_GID(kgid);
64662+
64663+ filp = task->exec_file;
64664+
64665+ /* kernel process, we'll give them the kernel role */
64666+ if (unlikely(!filp)) {
64667+ task->role = kernel_role;
64668+ task->acl = kernel_role->root_label;
64669+ return;
64670+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
64671+ role = lookup_acl_role_label(task, uid, gid);
64672+
64673+ /* don't change the role if we're not a privileged process */
64674+ if (role && task->role != role &&
64675+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
64676+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
64677+ return;
64678+
64679+ /* perform subject lookup in possibly new role
64680+ we can use this result below in the case where role == task->role
64681+ */
64682+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
64683+
64684+ /* if we changed uid/gid, but result in the same role
64685+ and are using inheritance, don't lose the inherited subject
64686+ if current subject is other than what normal lookup
64687+ would result in, we arrived via inheritance, don't
64688+ lose subject
64689+ */
64690+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
64691+ (subj == task->acl)))
64692+ task->acl = subj;
64693+
64694+ task->role = role;
64695+
64696+ task->is_writable = 0;
64697+
64698+ /* ignore additional mmap checks for processes that are writable
64699+ by the default ACL */
64700+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
64701+ if (unlikely(obj->mode & GR_WRITE))
64702+ task->is_writable = 1;
64703+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
64704+ if (unlikely(obj->mode & GR_WRITE))
64705+ task->is_writable = 1;
64706+
64707+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
64708+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
64709+#endif
64710+
64711+ gr_set_proc_res(task);
64712+
64713+ return;
64714+}
64715+
64716+int
64717+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
64718+ const int unsafe_flags)
64719+{
64720+ struct task_struct *task = current;
64721+ struct acl_subject_label *newacl;
64722+ struct acl_object_label *obj;
64723+ __u32 retmode;
64724+
64725+ if (unlikely(!(gr_status & GR_READY)))
64726+ return 0;
64727+
64728+ newacl = chk_subj_label(dentry, mnt, task->role);
64729+
64730+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
64731+ did an exec
64732+ */
64733+ rcu_read_lock();
64734+ read_lock(&tasklist_lock);
64735+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
64736+ (task->parent->acl->mode & GR_POVERRIDE))) {
64737+ read_unlock(&tasklist_lock);
64738+ rcu_read_unlock();
64739+ goto skip_check;
64740+ }
64741+ read_unlock(&tasklist_lock);
64742+ rcu_read_unlock();
64743+
64744+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
64745+ !(task->role->roletype & GR_ROLE_GOD) &&
64746+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
64747+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
64748+ if (unsafe_flags & LSM_UNSAFE_SHARE)
64749+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
64750+ else
64751+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
64752+ return -EACCES;
64753+ }
64754+
64755+skip_check:
64756+
64757+ obj = chk_obj_label(dentry, mnt, task->acl);
64758+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
64759+
64760+ if (!(task->acl->mode & GR_INHERITLEARN) &&
64761+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
64762+ if (obj->nested)
64763+ task->acl = obj->nested;
64764+ else
64765+ task->acl = newacl;
64766+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
64767+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
64768+
64769+ task->is_writable = 0;
64770+
64771+ /* ignore additional mmap checks for processes that are writable
64772+ by the default ACL */
64773+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
64774+ if (unlikely(obj->mode & GR_WRITE))
64775+ task->is_writable = 1;
64776+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
64777+ if (unlikely(obj->mode & GR_WRITE))
64778+ task->is_writable = 1;
64779+
64780+ gr_set_proc_res(task);
64781+
64782+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
64783+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
64784+#endif
64785+ return 0;
64786+}
64787+
64788+/* always called with valid inodev ptr */
64789+static void
64790+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
64791+{
64792+ struct acl_object_label *matchpo;
64793+ struct acl_subject_label *matchps;
64794+ struct acl_subject_label *subj;
64795+ struct acl_role_label *role;
64796+ unsigned int x;
64797+
64798+ FOR_EACH_ROLE_START(role)
64799+ FOR_EACH_SUBJECT_START(role, subj, x)
64800+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
64801+ matchpo->mode |= GR_DELETED;
64802+ FOR_EACH_SUBJECT_END(subj,x)
64803+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
64804+ /* nested subjects aren't in the role's subj_hash table */
64805+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
64806+ matchpo->mode |= GR_DELETED;
64807+ FOR_EACH_NESTED_SUBJECT_END(subj)
64808+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
64809+ matchps->mode |= GR_DELETED;
64810+ FOR_EACH_ROLE_END(role)
64811+
64812+ inodev->nentry->deleted = 1;
64813+
64814+ return;
64815+}
64816+
64817+void
64818+gr_handle_delete(const ino_t ino, const dev_t dev)
64819+{
64820+ struct inodev_entry *inodev;
64821+
64822+ if (unlikely(!(gr_status & GR_READY)))
64823+ return;
64824+
64825+ write_lock(&gr_inode_lock);
64826+ inodev = lookup_inodev_entry(ino, dev);
64827+ if (inodev != NULL)
64828+ do_handle_delete(inodev, ino, dev);
64829+ write_unlock(&gr_inode_lock);
64830+
64831+ return;
64832+}
64833+
64834+static void
64835+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
64836+ const ino_t newinode, const dev_t newdevice,
64837+ struct acl_subject_label *subj)
64838+{
64839+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
64840+ struct acl_object_label *match;
64841+
64842+ match = subj->obj_hash[index];
64843+
64844+ while (match && (match->inode != oldinode ||
64845+ match->device != olddevice ||
64846+ !(match->mode & GR_DELETED)))
64847+ match = match->next;
64848+
64849+ if (match && (match->inode == oldinode)
64850+ && (match->device == olddevice)
64851+ && (match->mode & GR_DELETED)) {
64852+ if (match->prev == NULL) {
64853+ subj->obj_hash[index] = match->next;
64854+ if (match->next != NULL)
64855+ match->next->prev = NULL;
64856+ } else {
64857+ match->prev->next = match->next;
64858+ if (match->next != NULL)
64859+ match->next->prev = match->prev;
64860+ }
64861+ match->prev = NULL;
64862+ match->next = NULL;
64863+ match->inode = newinode;
64864+ match->device = newdevice;
64865+ match->mode &= ~GR_DELETED;
64866+
64867+ insert_acl_obj_label(match, subj);
64868+ }
64869+
64870+ return;
64871+}
64872+
64873+static void
64874+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
64875+ const ino_t newinode, const dev_t newdevice,
64876+ struct acl_role_label *role)
64877+{
64878+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
64879+ struct acl_subject_label *match;
64880+
64881+ match = role->subj_hash[index];
64882+
64883+ while (match && (match->inode != oldinode ||
64884+ match->device != olddevice ||
64885+ !(match->mode & GR_DELETED)))
64886+ match = match->next;
64887+
64888+ if (match && (match->inode == oldinode)
64889+ && (match->device == olddevice)
64890+ && (match->mode & GR_DELETED)) {
64891+ if (match->prev == NULL) {
64892+ role->subj_hash[index] = match->next;
64893+ if (match->next != NULL)
64894+ match->next->prev = NULL;
64895+ } else {
64896+ match->prev->next = match->next;
64897+ if (match->next != NULL)
64898+ match->next->prev = match->prev;
64899+ }
64900+ match->prev = NULL;
64901+ match->next = NULL;
64902+ match->inode = newinode;
64903+ match->device = newdevice;
64904+ match->mode &= ~GR_DELETED;
64905+
64906+ insert_acl_subj_label(match, role);
64907+ }
64908+
64909+ return;
64910+}
64911+
64912+static void
64913+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
64914+ const ino_t newinode, const dev_t newdevice)
64915+{
64916+ unsigned int index = gr_fhash(oldinode, olddevice, inodev_set.i_size);
64917+ struct inodev_entry *match;
64918+
64919+ match = inodev_set.i_hash[index];
64920+
64921+ while (match && (match->nentry->inode != oldinode ||
64922+ match->nentry->device != olddevice || !match->nentry->deleted))
64923+ match = match->next;
64924+
64925+ if (match && (match->nentry->inode == oldinode)
64926+ && (match->nentry->device == olddevice) &&
64927+ match->nentry->deleted) {
64928+ if (match->prev == NULL) {
64929+ inodev_set.i_hash[index] = match->next;
64930+ if (match->next != NULL)
64931+ match->next->prev = NULL;
64932+ } else {
64933+ match->prev->next = match->next;
64934+ if (match->next != NULL)
64935+ match->next->prev = match->prev;
64936+ }
64937+ match->prev = NULL;
64938+ match->next = NULL;
64939+ match->nentry->inode = newinode;
64940+ match->nentry->device = newdevice;
64941+ match->nentry->deleted = 0;
64942+
64943+ insert_inodev_entry(match);
64944+ }
64945+
64946+ return;
64947+}
64948+
64949+static void
64950+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
64951+{
64952+ struct acl_subject_label *subj;
64953+ struct acl_role_label *role;
64954+ unsigned int x;
64955+
64956+ FOR_EACH_ROLE_START(role)
64957+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
64958+
64959+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
64960+ if ((subj->inode == ino) && (subj->device == dev)) {
64961+ subj->inode = ino;
64962+ subj->device = dev;
64963+ }
64964+ /* nested subjects aren't in the role's subj_hash table */
64965+ update_acl_obj_label(matchn->inode, matchn->device,
64966+ ino, dev, subj);
64967+ FOR_EACH_NESTED_SUBJECT_END(subj)
64968+ FOR_EACH_SUBJECT_START(role, subj, x)
64969+ update_acl_obj_label(matchn->inode, matchn->device,
64970+ ino, dev, subj);
64971+ FOR_EACH_SUBJECT_END(subj,x)
64972+ FOR_EACH_ROLE_END(role)
64973+
64974+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
64975+
64976+ return;
64977+}
64978+
64979+static void
64980+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
64981+ const struct vfsmount *mnt)
64982+{
64983+ ino_t ino = dentry->d_inode->i_ino;
64984+ dev_t dev = __get_dev(dentry);
64985+
64986+ __do_handle_create(matchn, ino, dev);
64987+
64988+ return;
64989+}
64990+
64991+void
64992+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
64993+{
64994+ struct name_entry *matchn;
64995+
64996+ if (unlikely(!(gr_status & GR_READY)))
64997+ return;
64998+
64999+ preempt_disable();
65000+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
65001+
65002+ if (unlikely((unsigned long)matchn)) {
65003+ write_lock(&gr_inode_lock);
65004+ do_handle_create(matchn, dentry, mnt);
65005+ write_unlock(&gr_inode_lock);
65006+ }
65007+ preempt_enable();
65008+
65009+ return;
65010+}
65011+
65012+void
65013+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
65014+{
65015+ struct name_entry *matchn;
65016+
65017+ if (unlikely(!(gr_status & GR_READY)))
65018+ return;
65019+
65020+ preempt_disable();
65021+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
65022+
65023+ if (unlikely((unsigned long)matchn)) {
65024+ write_lock(&gr_inode_lock);
65025+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
65026+ write_unlock(&gr_inode_lock);
65027+ }
65028+ preempt_enable();
65029+
65030+ return;
65031+}
65032+
65033+void
65034+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
65035+ struct dentry *old_dentry,
65036+ struct dentry *new_dentry,
65037+ struct vfsmount *mnt, const __u8 replace)
65038+{
65039+ struct name_entry *matchn;
65040+ struct inodev_entry *inodev;
65041+ struct inode *inode = new_dentry->d_inode;
65042+ ino_t old_ino = old_dentry->d_inode->i_ino;
65043+ dev_t old_dev = __get_dev(old_dentry);
65044+
65045+ /* vfs_rename swaps the name and parent link for old_dentry and
65046+ new_dentry
65047+ at this point, old_dentry has the new name, parent link, and inode
65048+ for the renamed file
65049+ if a file is being replaced by a rename, new_dentry has the inode
65050+ and name for the replaced file
65051+ */
65052+
65053+ if (unlikely(!(gr_status & GR_READY)))
65054+ return;
65055+
65056+ preempt_disable();
65057+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
65058+
65059+ /* we wouldn't have to check d_inode if it weren't for
65060+ NFS silly-renaming
65061+ */
65062+
65063+ write_lock(&gr_inode_lock);
65064+ if (unlikely(replace && inode)) {
65065+ ino_t new_ino = inode->i_ino;
65066+ dev_t new_dev = __get_dev(new_dentry);
65067+
65068+ inodev = lookup_inodev_entry(new_ino, new_dev);
65069+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
65070+ do_handle_delete(inodev, new_ino, new_dev);
65071+ }
65072+
65073+ inodev = lookup_inodev_entry(old_ino, old_dev);
65074+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
65075+ do_handle_delete(inodev, old_ino, old_dev);
65076+
65077+ if (unlikely((unsigned long)matchn))
65078+ do_handle_create(matchn, old_dentry, mnt);
65079+
65080+ write_unlock(&gr_inode_lock);
65081+ preempt_enable();
65082+
65083+ return;
65084+}
65085+
65086+static int
65087+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
65088+ unsigned char **sum)
65089+{
65090+ struct acl_role_label *r;
65091+ struct role_allowed_ip *ipp;
65092+ struct role_transition *trans;
65093+ unsigned int i;
65094+ int found = 0;
65095+ u32 curr_ip = current->signal->curr_ip;
65096+
65097+ current->signal->saved_ip = curr_ip;
65098+
65099+ /* check transition table */
65100+
65101+ for (trans = current->role->transitions; trans; trans = trans->next) {
65102+ if (!strcmp(rolename, trans->rolename)) {
65103+ found = 1;
65104+ break;
65105+ }
65106+ }
65107+
65108+ if (!found)
65109+ return 0;
65110+
65111+ /* handle special roles that do not require authentication
65112+ and check ip */
65113+
65114+ FOR_EACH_ROLE_START(r)
65115+ if (!strcmp(rolename, r->rolename) &&
65116+ (r->roletype & GR_ROLE_SPECIAL)) {
65117+ found = 0;
65118+ if (r->allowed_ips != NULL) {
65119+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
65120+ if ((ntohl(curr_ip) & ipp->netmask) ==
65121+ (ntohl(ipp->addr) & ipp->netmask))
65122+ found = 1;
65123+ }
65124+ } else
65125+ found = 2;
65126+ if (!found)
65127+ return 0;
65128+
65129+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
65130+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
65131+ *salt = NULL;
65132+ *sum = NULL;
65133+ return 1;
65134+ }
65135+ }
65136+ FOR_EACH_ROLE_END(r)
65137+
65138+ for (i = 0; i < num_sprole_pws; i++) {
65139+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
65140+ *salt = acl_special_roles[i]->salt;
65141+ *sum = acl_special_roles[i]->sum;
65142+ return 1;
65143+ }
65144+ }
65145+
65146+ return 0;
65147+}
65148+
65149+static void
65150+assign_special_role(char *rolename)
65151+{
65152+ struct acl_object_label *obj;
65153+ struct acl_role_label *r;
65154+ struct acl_role_label *assigned = NULL;
65155+ struct task_struct *tsk;
65156+ struct file *filp;
65157+
65158+ FOR_EACH_ROLE_START(r)
65159+ if (!strcmp(rolename, r->rolename) &&
65160+ (r->roletype & GR_ROLE_SPECIAL)) {
65161+ assigned = r;
65162+ break;
65163+ }
65164+ FOR_EACH_ROLE_END(r)
65165+
65166+ if (!assigned)
65167+ return;
65168+
65169+ read_lock(&tasklist_lock);
65170+ read_lock(&grsec_exec_file_lock);
65171+
65172+ tsk = current->real_parent;
65173+ if (tsk == NULL)
65174+ goto out_unlock;
65175+
65176+ filp = tsk->exec_file;
65177+ if (filp == NULL)
65178+ goto out_unlock;
65179+
65180+ tsk->is_writable = 0;
65181+
65182+ tsk->acl_sp_role = 1;
65183+ tsk->acl_role_id = ++acl_sp_role_value;
65184+ tsk->role = assigned;
65185+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
65186+
65187+ /* ignore additional mmap checks for processes that are writable
65188+ by the default ACL */
65189+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
65190+ if (unlikely(obj->mode & GR_WRITE))
65191+ tsk->is_writable = 1;
65192+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
65193+ if (unlikely(obj->mode & GR_WRITE))
65194+ tsk->is_writable = 1;
65195+
65196+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
65197+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
65198+#endif
65199+
65200+out_unlock:
65201+ read_unlock(&grsec_exec_file_lock);
65202+ read_unlock(&tasklist_lock);
65203+ return;
65204+}
65205+
65206+int gr_check_secure_terminal(struct task_struct *task)
65207+{
65208+ struct task_struct *p, *p2, *p3;
65209+ struct files_struct *files;
65210+ struct fdtable *fdt;
65211+ struct file *our_file = NULL, *file;
65212+ int i;
65213+
65214+ if (task->signal->tty == NULL)
65215+ return 1;
65216+
65217+ files = get_files_struct(task);
65218+ if (files != NULL) {
65219+ rcu_read_lock();
65220+ fdt = files_fdtable(files);
65221+ for (i=0; i < fdt->max_fds; i++) {
65222+ file = fcheck_files(files, i);
65223+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
65224+ get_file(file);
65225+ our_file = file;
65226+ }
65227+ }
65228+ rcu_read_unlock();
65229+ put_files_struct(files);
65230+ }
65231+
65232+ if (our_file == NULL)
65233+ return 1;
65234+
65235+ read_lock(&tasklist_lock);
65236+ do_each_thread(p2, p) {
65237+ files = get_files_struct(p);
65238+ if (files == NULL ||
65239+ (p->signal && p->signal->tty == task->signal->tty)) {
65240+ if (files != NULL)
65241+ put_files_struct(files);
65242+ continue;
65243+ }
65244+ rcu_read_lock();
65245+ fdt = files_fdtable(files);
65246+ for (i=0; i < fdt->max_fds; i++) {
65247+ file = fcheck_files(files, i);
65248+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
65249+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
65250+ p3 = task;
65251+ while (task_pid_nr(p3) > 0) {
65252+ if (p3 == p)
65253+ break;
65254+ p3 = p3->real_parent;
65255+ }
65256+ if (p3 == p)
65257+ break;
65258+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
65259+ gr_handle_alertkill(p);
65260+ rcu_read_unlock();
65261+ put_files_struct(files);
65262+ read_unlock(&tasklist_lock);
65263+ fput(our_file);
65264+ return 0;
65265+ }
65266+ }
65267+ rcu_read_unlock();
65268+ put_files_struct(files);
65269+ } while_each_thread(p2, p);
65270+ read_unlock(&tasklist_lock);
65271+
65272+ fput(our_file);
65273+ return 1;
65274+}
65275+
65276+static int gr_rbac_disable(void *unused)
65277+{
65278+ pax_open_kernel();
65279+ gr_status &= ~GR_READY;
65280+ pax_close_kernel();
65281+
65282+ return 0;
65283+}
65284+
65285+ssize_t
65286+write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos)
65287+{
65288+ struct gr_arg_wrapper uwrap;
65289+ unsigned char *sprole_salt = NULL;
65290+ unsigned char *sprole_sum = NULL;
65291+ int error = 0;
65292+ int error2 = 0;
65293+ size_t req_count = 0;
65294+
65295+ mutex_lock(&gr_dev_mutex);
65296+
65297+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
65298+ error = -EPERM;
65299+ goto out;
65300+ }
65301+
65302+#ifdef CONFIG_COMPAT
65303+ pax_open_kernel();
65304+ if (is_compat_task()) {
65305+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_compat;
65306+ copy_gr_arg = &copy_gr_arg_compat;
65307+ copy_acl_object_label = &copy_acl_object_label_compat;
65308+ copy_acl_subject_label = &copy_acl_subject_label_compat;
65309+ copy_acl_role_label = &copy_acl_role_label_compat;
65310+ copy_acl_ip_label = &copy_acl_ip_label_compat;
65311+ copy_role_allowed_ip = &copy_role_allowed_ip_compat;
65312+ copy_role_transition = &copy_role_transition_compat;
65313+ copy_sprole_pw = &copy_sprole_pw_compat;
65314+ copy_gr_hash_struct = &copy_gr_hash_struct_compat;
65315+ copy_pointer_from_array = &copy_pointer_from_array_compat;
65316+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat;
65317+ } else {
65318+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_normal;
65319+ copy_gr_arg = &copy_gr_arg_normal;
65320+ copy_acl_object_label = &copy_acl_object_label_normal;
65321+ copy_acl_subject_label = &copy_acl_subject_label_normal;
65322+ copy_acl_role_label = &copy_acl_role_label_normal;
65323+ copy_acl_ip_label = &copy_acl_ip_label_normal;
65324+ copy_role_allowed_ip = &copy_role_allowed_ip_normal;
65325+ copy_role_transition = &copy_role_transition_normal;
65326+ copy_sprole_pw = &copy_sprole_pw_normal;
65327+ copy_gr_hash_struct = &copy_gr_hash_struct_normal;
65328+ copy_pointer_from_array = &copy_pointer_from_array_normal;
65329+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal;
65330+ }
65331+ pax_close_kernel();
65332+#endif
65333+
65334+ req_count = get_gr_arg_wrapper_size();
65335+
65336+ if (count != req_count) {
65337+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count);
65338+ error = -EINVAL;
65339+ goto out;
65340+ }
65341+
65342+
65343+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
65344+ gr_auth_expires = 0;
65345+ gr_auth_attempts = 0;
65346+ }
65347+
65348+ error = copy_gr_arg_wrapper(buf, &uwrap);
65349+ if (error)
65350+ goto out;
65351+
65352+ error = copy_gr_arg(uwrap.arg, gr_usermode);
65353+ if (error)
65354+ goto out;
65355+
65356+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
65357+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
65358+ time_after(gr_auth_expires, get_seconds())) {
65359+ error = -EBUSY;
65360+ goto out;
65361+ }
65362+
65363+ /* if non-root trying to do anything other than use a special role,
65364+ do not attempt authentication, do not count towards authentication
65365+ locking
65366+ */
65367+
65368+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
65369+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
65370+ gr_is_global_nonroot(current_uid())) {
65371+ error = -EPERM;
65372+ goto out;
65373+ }
65374+
65375+ /* ensure pw and special role name are null terminated */
65376+
65377+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
65378+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
65379+
65380+ /* Okay.
65381+ * We have our enough of the argument structure..(we have yet
65382+ * to copy_from_user the tables themselves) . Copy the tables
65383+ * only if we need them, i.e. for loading operations. */
65384+
65385+ switch (gr_usermode->mode) {
65386+ case GR_STATUS:
65387+ if (gr_status & GR_READY) {
65388+ error = 1;
65389+ if (!gr_check_secure_terminal(current))
65390+ error = 3;
65391+ } else
65392+ error = 2;
65393+ goto out;
65394+ case GR_SHUTDOWN:
65395+ if ((gr_status & GR_READY)
65396+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
65397+ stop_machine(gr_rbac_disable, NULL, NULL);
65398+ free_variables();
65399+ memset(gr_usermode, 0, sizeof (struct gr_arg));
65400+ memset(gr_system_salt, 0, GR_SALT_LEN);
65401+ memset(gr_system_sum, 0, GR_SHA_LEN);
65402+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
65403+ } else if (gr_status & GR_READY) {
65404+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
65405+ error = -EPERM;
65406+ } else {
65407+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
65408+ error = -EAGAIN;
65409+ }
65410+ break;
65411+ case GR_ENABLE:
65412+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
65413+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
65414+ else {
65415+ if (gr_status & GR_READY)
65416+ error = -EAGAIN;
65417+ else
65418+ error = error2;
65419+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
65420+ }
65421+ break;
65422+ case GR_RELOAD:
65423+ if (!(gr_status & GR_READY)) {
65424+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
65425+ error = -EAGAIN;
65426+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
65427+ stop_machine(gr_rbac_disable, NULL, NULL);
65428+ free_variables();
65429+ error2 = gracl_init(gr_usermode);
65430+ if (!error2)
65431+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
65432+ else {
65433+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
65434+ error = error2;
65435+ }
65436+ } else {
65437+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
65438+ error = -EPERM;
65439+ }
65440+ break;
65441+ case GR_SEGVMOD:
65442+ if (unlikely(!(gr_status & GR_READY))) {
65443+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
65444+ error = -EAGAIN;
65445+ break;
65446+ }
65447+
65448+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
65449+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
65450+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
65451+ struct acl_subject_label *segvacl;
65452+ segvacl =
65453+ lookup_acl_subj_label(gr_usermode->segv_inode,
65454+ gr_usermode->segv_device,
65455+ current->role);
65456+ if (segvacl) {
65457+ segvacl->crashes = 0;
65458+ segvacl->expires = 0;
65459+ }
65460+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
65461+ gr_remove_uid(gr_usermode->segv_uid);
65462+ }
65463+ } else {
65464+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
65465+ error = -EPERM;
65466+ }
65467+ break;
65468+ case GR_SPROLE:
65469+ case GR_SPROLEPAM:
65470+ if (unlikely(!(gr_status & GR_READY))) {
65471+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
65472+ error = -EAGAIN;
65473+ break;
65474+ }
65475+
65476+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
65477+ current->role->expires = 0;
65478+ current->role->auth_attempts = 0;
65479+ }
65480+
65481+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
65482+ time_after(current->role->expires, get_seconds())) {
65483+ error = -EBUSY;
65484+ goto out;
65485+ }
65486+
65487+ if (lookup_special_role_auth
65488+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
65489+ && ((!sprole_salt && !sprole_sum)
65490+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
65491+ char *p = "";
65492+ assign_special_role(gr_usermode->sp_role);
65493+ read_lock(&tasklist_lock);
65494+ if (current->real_parent)
65495+ p = current->real_parent->role->rolename;
65496+ read_unlock(&tasklist_lock);
65497+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
65498+ p, acl_sp_role_value);
65499+ } else {
65500+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
65501+ error = -EPERM;
65502+ if(!(current->role->auth_attempts++))
65503+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
65504+
65505+ goto out;
65506+ }
65507+ break;
65508+ case GR_UNSPROLE:
65509+ if (unlikely(!(gr_status & GR_READY))) {
65510+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
65511+ error = -EAGAIN;
65512+ break;
65513+ }
65514+
65515+ if (current->role->roletype & GR_ROLE_SPECIAL) {
65516+ char *p = "";
65517+ int i = 0;
65518+
65519+ read_lock(&tasklist_lock);
65520+ if (current->real_parent) {
65521+ p = current->real_parent->role->rolename;
65522+ i = current->real_parent->acl_role_id;
65523+ }
65524+ read_unlock(&tasklist_lock);
65525+
65526+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
65527+ gr_set_acls(1);
65528+ } else {
65529+ error = -EPERM;
65530+ goto out;
65531+ }
65532+ break;
65533+ default:
65534+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
65535+ error = -EINVAL;
65536+ break;
65537+ }
65538+
65539+ if (error != -EPERM)
65540+ goto out;
65541+
65542+ if(!(gr_auth_attempts++))
65543+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
65544+
65545+ out:
65546+ mutex_unlock(&gr_dev_mutex);
65547+
65548+ if (!error)
65549+ error = req_count;
65550+
65551+ return error;
65552+}
65553+
65554+/* must be called with
65555+ rcu_read_lock();
65556+ read_lock(&tasklist_lock);
65557+ read_lock(&grsec_exec_file_lock);
65558+*/
65559+int gr_apply_subject_to_task(struct task_struct *task)
65560+{
65561+ struct acl_object_label *obj;
65562+ char *tmpname;
65563+ struct acl_subject_label *tmpsubj;
65564+ struct file *filp;
65565+ struct name_entry *nmatch;
65566+
65567+ filp = task->exec_file;
65568+ if (filp == NULL)
65569+ return 0;
65570+
65571+ /* the following is to apply the correct subject
65572+ on binaries running when the RBAC system
65573+ is enabled, when the binaries have been
65574+ replaced or deleted since their execution
65575+ -----
65576+ when the RBAC system starts, the inode/dev
65577+ from exec_file will be one the RBAC system
65578+ is unaware of. It only knows the inode/dev
65579+ of the present file on disk, or the absence
65580+ of it.
65581+ */
65582+ preempt_disable();
65583+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
65584+
65585+ nmatch = lookup_name_entry(tmpname);
65586+ preempt_enable();
65587+ tmpsubj = NULL;
65588+ if (nmatch) {
65589+ if (nmatch->deleted)
65590+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
65591+ else
65592+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
65593+ if (tmpsubj != NULL)
65594+ task->acl = tmpsubj;
65595+ }
65596+ if (tmpsubj == NULL)
65597+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
65598+ task->role);
65599+ if (task->acl) {
65600+ task->is_writable = 0;
65601+ /* ignore additional mmap checks for processes that are writable
65602+ by the default ACL */
65603+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
65604+ if (unlikely(obj->mode & GR_WRITE))
65605+ task->is_writable = 1;
65606+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
65607+ if (unlikely(obj->mode & GR_WRITE))
65608+ task->is_writable = 1;
65609+
65610+ gr_set_proc_res(task);
65611+
65612+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
65613+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
65614+#endif
65615+ } else {
65616+ return 1;
65617+ }
65618+
65619+ return 0;
65620+}
65621+
65622+int
65623+gr_set_acls(const int type)
65624+{
65625+ struct task_struct *task, *task2;
65626+ struct acl_role_label *role = current->role;
65627+ __u16 acl_role_id = current->acl_role_id;
65628+ const struct cred *cred;
65629+ int ret;
65630+
65631+ rcu_read_lock();
65632+ read_lock(&tasklist_lock);
65633+ read_lock(&grsec_exec_file_lock);
65634+ do_each_thread(task2, task) {
65635+ /* check to see if we're called from the exit handler,
65636+ if so, only replace ACLs that have inherited the admin
65637+ ACL */
65638+
65639+ if (type && (task->role != role ||
65640+ task->acl_role_id != acl_role_id))
65641+ continue;
65642+
65643+ task->acl_role_id = 0;
65644+ task->acl_sp_role = 0;
65645+
65646+ if (task->exec_file) {
65647+ cred = __task_cred(task);
65648+ task->role = lookup_acl_role_label(task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
65649+ ret = gr_apply_subject_to_task(task);
65650+ if (ret) {
65651+ read_unlock(&grsec_exec_file_lock);
65652+ read_unlock(&tasklist_lock);
65653+ rcu_read_unlock();
65654+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
65655+ return ret;
65656+ }
65657+ } else {
65658+ // it's a kernel process
65659+ task->role = kernel_role;
65660+ task->acl = kernel_role->root_label;
65661+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
65662+ task->acl->mode &= ~GR_PROCFIND;
65663+#endif
65664+ }
65665+ } while_each_thread(task2, task);
65666+ read_unlock(&grsec_exec_file_lock);
65667+ read_unlock(&tasklist_lock);
65668+ rcu_read_unlock();
65669+
65670+ return 0;
65671+}
65672+
65673+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
65674+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
65675+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
65676+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
65677+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
65678+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
65679+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
65680+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
65681+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
65682+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
65683+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
65684+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
65685+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
65686+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
65687+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
65688+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
65689+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
65690+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
65691+};
65692+
65693+void
65694+gr_learn_resource(const struct task_struct *task,
65695+ const int res, const unsigned long wanted, const int gt)
65696+{
65697+ struct acl_subject_label *acl;
65698+ const struct cred *cred;
65699+
65700+ if (unlikely((gr_status & GR_READY) &&
65701+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
65702+ goto skip_reslog;
65703+
65704+ gr_log_resource(task, res, wanted, gt);
65705+skip_reslog:
65706+
65707+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
65708+ return;
65709+
65710+ acl = task->acl;
65711+
65712+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
65713+ !(acl->resmask & (1U << (unsigned short) res))))
65714+ return;
65715+
65716+ if (wanted >= acl->res[res].rlim_cur) {
65717+ unsigned long res_add;
65718+
65719+ res_add = wanted + res_learn_bumps[res];
65720+
65721+ acl->res[res].rlim_cur = res_add;
65722+
65723+ if (wanted > acl->res[res].rlim_max)
65724+ acl->res[res].rlim_max = res_add;
65725+
65726+ /* only log the subject filename, since resource logging is supported for
65727+ single-subject learning only */
65728+ rcu_read_lock();
65729+ cred = __task_cred(task);
65730+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
65731+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
65732+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
65733+ "", (unsigned long) res, &task->signal->saved_ip);
65734+ rcu_read_unlock();
65735+ }
65736+
65737+ return;
65738+}
65739+EXPORT_SYMBOL(gr_learn_resource);
65740+#endif
65741+
65742+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
65743+void
65744+pax_set_initial_flags(struct linux_binprm *bprm)
65745+{
65746+ struct task_struct *task = current;
65747+ struct acl_subject_label *proc;
65748+ unsigned long flags;
65749+
65750+ if (unlikely(!(gr_status & GR_READY)))
65751+ return;
65752+
65753+ flags = pax_get_flags(task);
65754+
65755+ proc = task->acl;
65756+
65757+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
65758+ flags &= ~MF_PAX_PAGEEXEC;
65759+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
65760+ flags &= ~MF_PAX_SEGMEXEC;
65761+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
65762+ flags &= ~MF_PAX_RANDMMAP;
65763+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
65764+ flags &= ~MF_PAX_EMUTRAMP;
65765+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
65766+ flags &= ~MF_PAX_MPROTECT;
65767+
65768+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
65769+ flags |= MF_PAX_PAGEEXEC;
65770+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
65771+ flags |= MF_PAX_SEGMEXEC;
65772+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
65773+ flags |= MF_PAX_RANDMMAP;
65774+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
65775+ flags |= MF_PAX_EMUTRAMP;
65776+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
65777+ flags |= MF_PAX_MPROTECT;
65778+
65779+ pax_set_flags(task, flags);
65780+
65781+ return;
65782+}
65783+#endif
65784+
65785+int
65786+gr_handle_proc_ptrace(struct task_struct *task)
65787+{
65788+ struct file *filp;
65789+ struct task_struct *tmp = task;
65790+ struct task_struct *curtemp = current;
65791+ __u32 retmode;
65792+
65793+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
65794+ if (unlikely(!(gr_status & GR_READY)))
65795+ return 0;
65796+#endif
65797+
65798+ read_lock(&tasklist_lock);
65799+ read_lock(&grsec_exec_file_lock);
65800+ filp = task->exec_file;
65801+
65802+ while (task_pid_nr(tmp) > 0) {
65803+ if (tmp == curtemp)
65804+ break;
65805+ tmp = tmp->real_parent;
65806+ }
65807+
65808+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
65809+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
65810+ read_unlock(&grsec_exec_file_lock);
65811+ read_unlock(&tasklist_lock);
65812+ return 1;
65813+ }
65814+
65815+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
65816+ if (!(gr_status & GR_READY)) {
65817+ read_unlock(&grsec_exec_file_lock);
65818+ read_unlock(&tasklist_lock);
65819+ return 0;
65820+ }
65821+#endif
65822+
65823+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
65824+ read_unlock(&grsec_exec_file_lock);
65825+ read_unlock(&tasklist_lock);
65826+
65827+ if (retmode & GR_NOPTRACE)
65828+ return 1;
65829+
65830+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
65831+ && (current->acl != task->acl || (current->acl != current->role->root_label
65832+ && task_pid_nr(current) != task_pid_nr(task))))
65833+ return 1;
65834+
65835+ return 0;
65836+}
65837+
65838+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
65839+{
65840+ if (unlikely(!(gr_status & GR_READY)))
65841+ return;
65842+
65843+ if (!(current->role->roletype & GR_ROLE_GOD))
65844+ return;
65845+
65846+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
65847+ p->role->rolename, gr_task_roletype_to_char(p),
65848+ p->acl->filename);
65849+}
65850+
65851+int
65852+gr_handle_ptrace(struct task_struct *task, const long request)
65853+{
65854+ struct task_struct *tmp = task;
65855+ struct task_struct *curtemp = current;
65856+ __u32 retmode;
65857+
65858+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
65859+ if (unlikely(!(gr_status & GR_READY)))
65860+ return 0;
65861+#endif
65862+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
65863+ read_lock(&tasklist_lock);
65864+ while (task_pid_nr(tmp) > 0) {
65865+ if (tmp == curtemp)
65866+ break;
65867+ tmp = tmp->real_parent;
65868+ }
65869+
65870+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
65871+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
65872+ read_unlock(&tasklist_lock);
65873+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
65874+ return 1;
65875+ }
65876+ read_unlock(&tasklist_lock);
65877+ }
65878+
65879+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
65880+ if (!(gr_status & GR_READY))
65881+ return 0;
65882+#endif
65883+
65884+ read_lock(&grsec_exec_file_lock);
65885+ if (unlikely(!task->exec_file)) {
65886+ read_unlock(&grsec_exec_file_lock);
65887+ return 0;
65888+ }
65889+
65890+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
65891+ read_unlock(&grsec_exec_file_lock);
65892+
65893+ if (retmode & GR_NOPTRACE) {
65894+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
65895+ return 1;
65896+ }
65897+
65898+ if (retmode & GR_PTRACERD) {
65899+ switch (request) {
65900+ case PTRACE_SEIZE:
65901+ case PTRACE_POKETEXT:
65902+ case PTRACE_POKEDATA:
65903+ case PTRACE_POKEUSR:
65904+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
65905+ case PTRACE_SETREGS:
65906+ case PTRACE_SETFPREGS:
65907+#endif
65908+#ifdef CONFIG_X86
65909+ case PTRACE_SETFPXREGS:
65910+#endif
65911+#ifdef CONFIG_ALTIVEC
65912+ case PTRACE_SETVRREGS:
65913+#endif
65914+ return 1;
65915+ default:
65916+ return 0;
65917+ }
65918+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
65919+ !(current->role->roletype & GR_ROLE_GOD) &&
65920+ (current->acl != task->acl)) {
65921+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
65922+ return 1;
65923+ }
65924+
65925+ return 0;
65926+}
65927+
65928+static int is_writable_mmap(const struct file *filp)
65929+{
65930+ struct task_struct *task = current;
65931+ struct acl_object_label *obj, *obj2;
65932+
65933+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
65934+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
65935+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
65936+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
65937+ task->role->root_label);
65938+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
65939+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
65940+ return 1;
65941+ }
65942+ }
65943+ return 0;
65944+}
65945+
65946+int
65947+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
65948+{
65949+ __u32 mode;
65950+
65951+ if (unlikely(!file || !(prot & PROT_EXEC)))
65952+ return 1;
65953+
65954+ if (is_writable_mmap(file))
65955+ return 0;
65956+
65957+ mode =
65958+ gr_search_file(file->f_path.dentry,
65959+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
65960+ file->f_path.mnt);
65961+
65962+ if (!gr_tpe_allow(file))
65963+ return 0;
65964+
65965+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
65966+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
65967+ return 0;
65968+ } else if (unlikely(!(mode & GR_EXEC))) {
65969+ return 0;
65970+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
65971+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
65972+ return 1;
65973+ }
65974+
65975+ return 1;
65976+}
65977+
65978+int
65979+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
65980+{
65981+ __u32 mode;
65982+
65983+ if (unlikely(!file || !(prot & PROT_EXEC)))
65984+ return 1;
65985+
65986+ if (is_writable_mmap(file))
65987+ return 0;
65988+
65989+ mode =
65990+ gr_search_file(file->f_path.dentry,
65991+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
65992+ file->f_path.mnt);
65993+
65994+ if (!gr_tpe_allow(file))
65995+ return 0;
65996+
65997+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
65998+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
65999+ return 0;
66000+ } else if (unlikely(!(mode & GR_EXEC))) {
66001+ return 0;
66002+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
66003+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
66004+ return 1;
66005+ }
66006+
66007+ return 1;
66008+}
66009+
66010+void
66011+gr_acl_handle_psacct(struct task_struct *task, const long code)
66012+{
66013+ unsigned long runtime;
66014+ unsigned long cputime;
66015+ unsigned int wday, cday;
66016+ __u8 whr, chr;
66017+ __u8 wmin, cmin;
66018+ __u8 wsec, csec;
66019+ struct timespec timeval;
66020+
66021+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
66022+ !(task->acl->mode & GR_PROCACCT)))
66023+ return;
66024+
66025+ do_posix_clock_monotonic_gettime(&timeval);
66026+ runtime = timeval.tv_sec - task->start_time.tv_sec;
66027+ wday = runtime / (3600 * 24);
66028+ runtime -= wday * (3600 * 24);
66029+ whr = runtime / 3600;
66030+ runtime -= whr * 3600;
66031+ wmin = runtime / 60;
66032+ runtime -= wmin * 60;
66033+ wsec = runtime;
66034+
66035+ cputime = (task->utime + task->stime) / HZ;
66036+ cday = cputime / (3600 * 24);
66037+ cputime -= cday * (3600 * 24);
66038+ chr = cputime / 3600;
66039+ cputime -= chr * 3600;
66040+ cmin = cputime / 60;
66041+ cputime -= cmin * 60;
66042+ csec = cputime;
66043+
66044+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
66045+
66046+ return;
66047+}
66048+
66049+void gr_set_kernel_label(struct task_struct *task)
66050+{
66051+ if (gr_status & GR_READY) {
66052+ task->role = kernel_role;
66053+ task->acl = kernel_role->root_label;
66054+ }
66055+ return;
66056+}
66057+
66058+#ifdef CONFIG_TASKSTATS
66059+int gr_is_taskstats_denied(int pid)
66060+{
66061+ struct task_struct *task;
66062+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66063+ const struct cred *cred;
66064+#endif
66065+ int ret = 0;
66066+
66067+ /* restrict taskstats viewing to un-chrooted root users
66068+ who have the 'view' subject flag if the RBAC system is enabled
66069+ */
66070+
66071+ rcu_read_lock();
66072+ read_lock(&tasklist_lock);
66073+ task = find_task_by_vpid(pid);
66074+ if (task) {
66075+#ifdef CONFIG_GRKERNSEC_CHROOT
66076+ if (proc_is_chrooted(task))
66077+ ret = -EACCES;
66078+#endif
66079+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66080+ cred = __task_cred(task);
66081+#ifdef CONFIG_GRKERNSEC_PROC_USER
66082+ if (gr_is_global_nonroot(cred->uid))
66083+ ret = -EACCES;
66084+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66085+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
66086+ ret = -EACCES;
66087+#endif
66088+#endif
66089+ if (gr_status & GR_READY) {
66090+ if (!(task->acl->mode & GR_VIEW))
66091+ ret = -EACCES;
66092+ }
66093+ } else
66094+ ret = -ENOENT;
66095+
66096+ read_unlock(&tasklist_lock);
66097+ rcu_read_unlock();
66098+
66099+ return ret;
66100+}
66101+#endif
66102+
66103+/* AUXV entries are filled via a descendant of search_binary_handler
66104+ after we've already applied the subject for the target
66105+*/
66106+int gr_acl_enable_at_secure(void)
66107+{
66108+ if (unlikely(!(gr_status & GR_READY)))
66109+ return 0;
66110+
66111+ if (current->acl->mode & GR_ATSECURE)
66112+ return 1;
66113+
66114+ return 0;
66115+}
66116+
66117+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
66118+{
66119+ struct task_struct *task = current;
66120+ struct dentry *dentry = file->f_path.dentry;
66121+ struct vfsmount *mnt = file->f_path.mnt;
66122+ struct acl_object_label *obj, *tmp;
66123+ struct acl_subject_label *subj;
66124+ unsigned int bufsize;
66125+ int is_not_root;
66126+ char *path;
66127+ dev_t dev = __get_dev(dentry);
66128+
66129+ if (unlikely(!(gr_status & GR_READY)))
66130+ return 1;
66131+
66132+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
66133+ return 1;
66134+
66135+ /* ignore Eric Biederman */
66136+ if (IS_PRIVATE(dentry->d_inode))
66137+ return 1;
66138+
66139+ subj = task->acl;
66140+ read_lock(&gr_inode_lock);
66141+ do {
66142+ obj = lookup_acl_obj_label(ino, dev, subj);
66143+ if (obj != NULL) {
66144+ read_unlock(&gr_inode_lock);
66145+ return (obj->mode & GR_FIND) ? 1 : 0;
66146+ }
66147+ } while ((subj = subj->parent_subject));
66148+ read_unlock(&gr_inode_lock);
66149+
66150+ /* this is purely an optimization since we're looking for an object
66151+ for the directory we're doing a readdir on
66152+ if it's possible for any globbed object to match the entry we're
66153+ filling into the directory, then the object we find here will be
66154+ an anchor point with attached globbed objects
66155+ */
66156+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
66157+ if (obj->globbed == NULL)
66158+ return (obj->mode & GR_FIND) ? 1 : 0;
66159+
66160+ is_not_root = ((obj->filename[0] == '/') &&
66161+ (obj->filename[1] == '\0')) ? 0 : 1;
66162+ bufsize = PAGE_SIZE - namelen - is_not_root;
66163+
66164+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
66165+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
66166+ return 1;
66167+
66168+ preempt_disable();
66169+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
66170+ bufsize);
66171+
66172+ bufsize = strlen(path);
66173+
66174+ /* if base is "/", don't append an additional slash */
66175+ if (is_not_root)
66176+ *(path + bufsize) = '/';
66177+ memcpy(path + bufsize + is_not_root, name, namelen);
66178+ *(path + bufsize + namelen + is_not_root) = '\0';
66179+
66180+ tmp = obj->globbed;
66181+ while (tmp) {
66182+ if (!glob_match(tmp->filename, path)) {
66183+ preempt_enable();
66184+ return (tmp->mode & GR_FIND) ? 1 : 0;
66185+ }
66186+ tmp = tmp->next;
66187+ }
66188+ preempt_enable();
66189+ return (obj->mode & GR_FIND) ? 1 : 0;
66190+}
66191+
66192+void gr_put_exec_file(struct task_struct *task)
66193+{
66194+ struct file *filp;
66195+
66196+ write_lock(&grsec_exec_file_lock);
66197+ filp = task->exec_file;
66198+ task->exec_file = NULL;
66199+ write_unlock(&grsec_exec_file_lock);
66200+
66201+ if (filp)
66202+ fput(filp);
66203+
66204+ return;
66205+}
66206+
66207+
66208+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
66209+EXPORT_SYMBOL(gr_acl_is_enabled);
66210+#endif
66211+EXPORT_SYMBOL(gr_set_kernel_label);
66212+#ifdef CONFIG_SECURITY
66213+EXPORT_SYMBOL(gr_check_user_change);
66214+EXPORT_SYMBOL(gr_check_group_change);
66215+#endif
66216+
66217diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
66218new file mode 100644
66219index 0000000..34fefda
66220--- /dev/null
66221+++ b/grsecurity/gracl_alloc.c
66222@@ -0,0 +1,105 @@
66223+#include <linux/kernel.h>
66224+#include <linux/mm.h>
66225+#include <linux/slab.h>
66226+#include <linux/vmalloc.h>
66227+#include <linux/gracl.h>
66228+#include <linux/grsecurity.h>
66229+
66230+static unsigned long alloc_stack_next = 1;
66231+static unsigned long alloc_stack_size = 1;
66232+static void **alloc_stack;
66233+
66234+static __inline__ int
66235+alloc_pop(void)
66236+{
66237+ if (alloc_stack_next == 1)
66238+ return 0;
66239+
66240+ kfree(alloc_stack[alloc_stack_next - 2]);
66241+
66242+ alloc_stack_next--;
66243+
66244+ return 1;
66245+}
66246+
66247+static __inline__ int
66248+alloc_push(void *buf)
66249+{
66250+ if (alloc_stack_next >= alloc_stack_size)
66251+ return 1;
66252+
66253+ alloc_stack[alloc_stack_next - 1] = buf;
66254+
66255+ alloc_stack_next++;
66256+
66257+ return 0;
66258+}
66259+
66260+void *
66261+acl_alloc(unsigned long len)
66262+{
66263+ void *ret = NULL;
66264+
66265+ if (!len || len > PAGE_SIZE)
66266+ goto out;
66267+
66268+ ret = kmalloc(len, GFP_KERNEL);
66269+
66270+ if (ret) {
66271+ if (alloc_push(ret)) {
66272+ kfree(ret);
66273+ ret = NULL;
66274+ }
66275+ }
66276+
66277+out:
66278+ return ret;
66279+}
66280+
66281+void *
66282+acl_alloc_num(unsigned long num, unsigned long len)
66283+{
66284+ if (!len || (num > (PAGE_SIZE / len)))
66285+ return NULL;
66286+
66287+ return acl_alloc(num * len);
66288+}
66289+
66290+void
66291+acl_free_all(void)
66292+{
66293+ if (gr_acl_is_enabled() || !alloc_stack)
66294+ return;
66295+
66296+ while (alloc_pop()) ;
66297+
66298+ if (alloc_stack) {
66299+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
66300+ kfree(alloc_stack);
66301+ else
66302+ vfree(alloc_stack);
66303+ }
66304+
66305+ alloc_stack = NULL;
66306+ alloc_stack_size = 1;
66307+ alloc_stack_next = 1;
66308+
66309+ return;
66310+}
66311+
66312+int
66313+acl_alloc_stack_init(unsigned long size)
66314+{
66315+ if ((size * sizeof (void *)) <= PAGE_SIZE)
66316+ alloc_stack =
66317+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
66318+ else
66319+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
66320+
66321+ alloc_stack_size = size;
66322+
66323+ if (!alloc_stack)
66324+ return 0;
66325+ else
66326+ return 1;
66327+}
66328diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
66329new file mode 100644
66330index 0000000..bdd51ea
66331--- /dev/null
66332+++ b/grsecurity/gracl_cap.c
66333@@ -0,0 +1,110 @@
66334+#include <linux/kernel.h>
66335+#include <linux/module.h>
66336+#include <linux/sched.h>
66337+#include <linux/gracl.h>
66338+#include <linux/grsecurity.h>
66339+#include <linux/grinternal.h>
66340+
66341+extern const char *captab_log[];
66342+extern int captab_log_entries;
66343+
66344+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
66345+{
66346+ struct acl_subject_label *curracl;
66347+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
66348+ kernel_cap_t cap_audit = __cap_empty_set;
66349+
66350+ if (!gr_acl_is_enabled())
66351+ return 1;
66352+
66353+ curracl = task->acl;
66354+
66355+ cap_drop = curracl->cap_lower;
66356+ cap_mask = curracl->cap_mask;
66357+ cap_audit = curracl->cap_invert_audit;
66358+
66359+ while ((curracl = curracl->parent_subject)) {
66360+ /* if the cap isn't specified in the current computed mask but is specified in the
66361+ current level subject, and is lowered in the current level subject, then add
66362+ it to the set of dropped capabilities
66363+ otherwise, add the current level subject's mask to the current computed mask
66364+ */
66365+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
66366+ cap_raise(cap_mask, cap);
66367+ if (cap_raised(curracl->cap_lower, cap))
66368+ cap_raise(cap_drop, cap);
66369+ if (cap_raised(curracl->cap_invert_audit, cap))
66370+ cap_raise(cap_audit, cap);
66371+ }
66372+ }
66373+
66374+ if (!cap_raised(cap_drop, cap)) {
66375+ if (cap_raised(cap_audit, cap))
66376+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
66377+ return 1;
66378+ }
66379+
66380+ curracl = task->acl;
66381+
66382+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
66383+ && cap_raised(cred->cap_effective, cap)) {
66384+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
66385+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
66386+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
66387+ gr_to_filename(task->exec_file->f_path.dentry,
66388+ task->exec_file->f_path.mnt) : curracl->filename,
66389+ curracl->filename, 0UL,
66390+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
66391+ return 1;
66392+ }
66393+
66394+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
66395+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
66396+
66397+ return 0;
66398+}
66399+
66400+int
66401+gr_acl_is_capable(const int cap)
66402+{
66403+ return gr_task_acl_is_capable(current, current_cred(), cap);
66404+}
66405+
66406+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
66407+{
66408+ struct acl_subject_label *curracl;
66409+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
66410+
66411+ if (!gr_acl_is_enabled())
66412+ return 1;
66413+
66414+ curracl = task->acl;
66415+
66416+ cap_drop = curracl->cap_lower;
66417+ cap_mask = curracl->cap_mask;
66418+
66419+ while ((curracl = curracl->parent_subject)) {
66420+ /* if the cap isn't specified in the current computed mask but is specified in the
66421+ current level subject, and is lowered in the current level subject, then add
66422+ it to the set of dropped capabilities
66423+ otherwise, add the current level subject's mask to the current computed mask
66424+ */
66425+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
66426+ cap_raise(cap_mask, cap);
66427+ if (cap_raised(curracl->cap_lower, cap))
66428+ cap_raise(cap_drop, cap);
66429+ }
66430+ }
66431+
66432+ if (!cap_raised(cap_drop, cap))
66433+ return 1;
66434+
66435+ return 0;
66436+}
66437+
66438+int
66439+gr_acl_is_capable_nolog(const int cap)
66440+{
66441+ return gr_task_acl_is_capable_nolog(current, cap);
66442+}
66443+
66444diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c
66445new file mode 100644
66446index 0000000..a43dd06
66447--- /dev/null
66448+++ b/grsecurity/gracl_compat.c
66449@@ -0,0 +1,269 @@
66450+#include <linux/kernel.h>
66451+#include <linux/gracl.h>
66452+#include <linux/compat.h>
66453+#include <linux/gracl_compat.h>
66454+
66455+#include <asm/uaccess.h>
66456+
66457+int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap)
66458+{
66459+ struct gr_arg_wrapper_compat uwrapcompat;
66460+
66461+ if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat)))
66462+ return -EFAULT;
66463+
66464+ if ((uwrapcompat.version != GRSECURITY_VERSION) ||
66465+ (uwrapcompat.size != sizeof(struct gr_arg_compat)))
66466+ return -EINVAL;
66467+
66468+ uwrap->arg = compat_ptr(uwrapcompat.arg);
66469+ uwrap->version = uwrapcompat.version;
66470+ uwrap->size = sizeof(struct gr_arg);
66471+
66472+ return 0;
66473+}
66474+
66475+int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg)
66476+{
66477+ struct gr_arg_compat argcompat;
66478+
66479+ if (copy_from_user(&argcompat, buf, sizeof(argcompat)))
66480+ return -EFAULT;
66481+
66482+ arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table);
66483+ arg->role_db.num_pointers = argcompat.role_db.num_pointers;
66484+ arg->role_db.num_roles = argcompat.role_db.num_roles;
66485+ arg->role_db.num_domain_children = argcompat.role_db.num_domain_children;
66486+ arg->role_db.num_subjects = argcompat.role_db.num_subjects;
66487+ arg->role_db.num_objects = argcompat.role_db.num_objects;
66488+
66489+ memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw));
66490+ memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt));
66491+ memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum));
66492+ memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role));
66493+ arg->sprole_pws = compat_ptr(argcompat.sprole_pws);
66494+ arg->segv_device = argcompat.segv_device;
66495+ arg->segv_inode = argcompat.segv_inode;
66496+ arg->segv_uid = argcompat.segv_uid;
66497+ arg->num_sprole_pws = argcompat.num_sprole_pws;
66498+ arg->mode = argcompat.mode;
66499+
66500+ return 0;
66501+}
66502+
66503+int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp)
66504+{
66505+ struct acl_object_label_compat objcompat;
66506+
66507+ if (copy_from_user(&objcompat, userp, sizeof(objcompat)))
66508+ return -EFAULT;
66509+
66510+ obj->filename = compat_ptr(objcompat.filename);
66511+ obj->inode = objcompat.inode;
66512+ obj->device = objcompat.device;
66513+ obj->mode = objcompat.mode;
66514+
66515+ obj->nested = compat_ptr(objcompat.nested);
66516+ obj->globbed = compat_ptr(objcompat.globbed);
66517+
66518+ obj->prev = compat_ptr(objcompat.prev);
66519+ obj->next = compat_ptr(objcompat.next);
66520+
66521+ return 0;
66522+}
66523+
66524+int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp)
66525+{
66526+ unsigned int i;
66527+ struct acl_subject_label_compat subjcompat;
66528+
66529+ if (copy_from_user(&subjcompat, userp, sizeof(subjcompat)))
66530+ return -EFAULT;
66531+
66532+ subj->filename = compat_ptr(subjcompat.filename);
66533+ subj->inode = subjcompat.inode;
66534+ subj->device = subjcompat.device;
66535+ subj->mode = subjcompat.mode;
66536+ subj->cap_mask = subjcompat.cap_mask;
66537+ subj->cap_lower = subjcompat.cap_lower;
66538+ subj->cap_invert_audit = subjcompat.cap_invert_audit;
66539+
66540+ for (i = 0; i < GR_NLIMITS; i++) {
66541+ if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY)
66542+ subj->res[i].rlim_cur = RLIM_INFINITY;
66543+ else
66544+ subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur;
66545+ if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY)
66546+ subj->res[i].rlim_max = RLIM_INFINITY;
66547+ else
66548+ subj->res[i].rlim_max = subjcompat.res[i].rlim_max;
66549+ }
66550+ subj->resmask = subjcompat.resmask;
66551+
66552+ subj->user_trans_type = subjcompat.user_trans_type;
66553+ subj->group_trans_type = subjcompat.group_trans_type;
66554+ subj->user_transitions = compat_ptr(subjcompat.user_transitions);
66555+ subj->group_transitions = compat_ptr(subjcompat.group_transitions);
66556+ subj->user_trans_num = subjcompat.user_trans_num;
66557+ subj->group_trans_num = subjcompat.group_trans_num;
66558+
66559+ memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families));
66560+ memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto));
66561+ subj->ip_type = subjcompat.ip_type;
66562+ subj->ips = compat_ptr(subjcompat.ips);
66563+ subj->ip_num = subjcompat.ip_num;
66564+ subj->inaddr_any_override = subjcompat.inaddr_any_override;
66565+
66566+ subj->crashes = subjcompat.crashes;
66567+ subj->expires = subjcompat.expires;
66568+
66569+ subj->parent_subject = compat_ptr(subjcompat.parent_subject);
66570+ subj->hash = compat_ptr(subjcompat.hash);
66571+ subj->prev = compat_ptr(subjcompat.prev);
66572+ subj->next = compat_ptr(subjcompat.next);
66573+
66574+ subj->obj_hash = compat_ptr(subjcompat.obj_hash);
66575+ subj->obj_hash_size = subjcompat.obj_hash_size;
66576+ subj->pax_flags = subjcompat.pax_flags;
66577+
66578+ return 0;
66579+}
66580+
66581+int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp)
66582+{
66583+ struct acl_role_label_compat rolecompat;
66584+
66585+ if (copy_from_user(&rolecompat, userp, sizeof(rolecompat)))
66586+ return -EFAULT;
66587+
66588+ role->rolename = compat_ptr(rolecompat.rolename);
66589+ role->uidgid = rolecompat.uidgid;
66590+ role->roletype = rolecompat.roletype;
66591+
66592+ role->auth_attempts = rolecompat.auth_attempts;
66593+ role->expires = rolecompat.expires;
66594+
66595+ role->root_label = compat_ptr(rolecompat.root_label);
66596+ role->hash = compat_ptr(rolecompat.hash);
66597+
66598+ role->prev = compat_ptr(rolecompat.prev);
66599+ role->next = compat_ptr(rolecompat.next);
66600+
66601+ role->transitions = compat_ptr(rolecompat.transitions);
66602+ role->allowed_ips = compat_ptr(rolecompat.allowed_ips);
66603+ role->domain_children = compat_ptr(rolecompat.domain_children);
66604+ role->domain_child_num = rolecompat.domain_child_num;
66605+
66606+ role->umask = rolecompat.umask;
66607+
66608+ role->subj_hash = compat_ptr(rolecompat.subj_hash);
66609+ role->subj_hash_size = rolecompat.subj_hash_size;
66610+
66611+ return 0;
66612+}
66613+
66614+int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
66615+{
66616+ struct role_allowed_ip_compat roleip_compat;
66617+
66618+ if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat)))
66619+ return -EFAULT;
66620+
66621+ roleip->addr = roleip_compat.addr;
66622+ roleip->netmask = roleip_compat.netmask;
66623+
66624+ roleip->prev = compat_ptr(roleip_compat.prev);
66625+ roleip->next = compat_ptr(roleip_compat.next);
66626+
66627+ return 0;
66628+}
66629+
66630+int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp)
66631+{
66632+ struct role_transition_compat trans_compat;
66633+
66634+ if (copy_from_user(&trans_compat, userp, sizeof(trans_compat)))
66635+ return -EFAULT;
66636+
66637+ trans->rolename = compat_ptr(trans_compat.rolename);
66638+
66639+ trans->prev = compat_ptr(trans_compat.prev);
66640+ trans->next = compat_ptr(trans_compat.next);
66641+
66642+ return 0;
66643+
66644+}
66645+
66646+int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
66647+{
66648+ struct gr_hash_struct_compat hash_compat;
66649+
66650+ if (copy_from_user(&hash_compat, userp, sizeof(hash_compat)))
66651+ return -EFAULT;
66652+
66653+ hash->table = compat_ptr(hash_compat.table);
66654+ hash->nametable = compat_ptr(hash_compat.nametable);
66655+ hash->first = compat_ptr(hash_compat.first);
66656+
66657+ hash->table_size = hash_compat.table_size;
66658+ hash->used_size = hash_compat.used_size;
66659+
66660+ hash->type = hash_compat.type;
66661+
66662+ return 0;
66663+}
66664+
66665+int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp)
66666+{
66667+ compat_uptr_t ptrcompat;
66668+
66669+ if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat)))
66670+ return -EFAULT;
66671+
66672+ *(void **)ptr = compat_ptr(ptrcompat);
66673+
66674+ return 0;
66675+}
66676+
66677+int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp)
66678+{
66679+ struct acl_ip_label_compat ip_compat;
66680+
66681+ if (copy_from_user(&ip_compat, userp, sizeof(ip_compat)))
66682+ return -EFAULT;
66683+
66684+ ip->iface = compat_ptr(ip_compat.iface);
66685+ ip->addr = ip_compat.addr;
66686+ ip->netmask = ip_compat.netmask;
66687+ ip->low = ip_compat.low;
66688+ ip->high = ip_compat.high;
66689+ ip->mode = ip_compat.mode;
66690+ ip->type = ip_compat.type;
66691+
66692+ memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto));
66693+
66694+ ip->prev = compat_ptr(ip_compat.prev);
66695+ ip->next = compat_ptr(ip_compat.next);
66696+
66697+ return 0;
66698+}
66699+
66700+int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
66701+{
66702+ struct sprole_pw_compat pw_compat;
66703+
66704+ if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat)))
66705+ return -EFAULT;
66706+
66707+ pw->rolename = compat_ptr(pw_compat.rolename);
66708+ memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt));
66709+ memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum));
66710+
66711+ return 0;
66712+}
66713+
66714+size_t get_gr_arg_wrapper_size_compat(void)
66715+{
66716+ return sizeof(struct gr_arg_wrapper_compat);
66717+}
66718+
66719diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
66720new file mode 100644
66721index 0000000..a340c17
66722--- /dev/null
66723+++ b/grsecurity/gracl_fs.c
66724@@ -0,0 +1,431 @@
66725+#include <linux/kernel.h>
66726+#include <linux/sched.h>
66727+#include <linux/types.h>
66728+#include <linux/fs.h>
66729+#include <linux/file.h>
66730+#include <linux/stat.h>
66731+#include <linux/grsecurity.h>
66732+#include <linux/grinternal.h>
66733+#include <linux/gracl.h>
66734+
66735+umode_t
66736+gr_acl_umask(void)
66737+{
66738+ if (unlikely(!gr_acl_is_enabled()))
66739+ return 0;
66740+
66741+ return current->role->umask;
66742+}
66743+
66744+__u32
66745+gr_acl_handle_hidden_file(const struct dentry * dentry,
66746+ const struct vfsmount * mnt)
66747+{
66748+ __u32 mode;
66749+
66750+ if (unlikely(!dentry->d_inode))
66751+ return GR_FIND;
66752+
66753+ mode =
66754+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
66755+
66756+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
66757+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
66758+ return mode;
66759+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
66760+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
66761+ return 0;
66762+ } else if (unlikely(!(mode & GR_FIND)))
66763+ return 0;
66764+
66765+ return GR_FIND;
66766+}
66767+
66768+__u32
66769+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
66770+ int acc_mode)
66771+{
66772+ __u32 reqmode = GR_FIND;
66773+ __u32 mode;
66774+
66775+ if (unlikely(!dentry->d_inode))
66776+ return reqmode;
66777+
66778+ if (acc_mode & MAY_APPEND)
66779+ reqmode |= GR_APPEND;
66780+ else if (acc_mode & MAY_WRITE)
66781+ reqmode |= GR_WRITE;
66782+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
66783+ reqmode |= GR_READ;
66784+
66785+ mode =
66786+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
66787+ mnt);
66788+
66789+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
66790+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
66791+ reqmode & GR_READ ? " reading" : "",
66792+ reqmode & GR_WRITE ? " writing" : reqmode &
66793+ GR_APPEND ? " appending" : "");
66794+ return reqmode;
66795+ } else
66796+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
66797+ {
66798+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
66799+ reqmode & GR_READ ? " reading" : "",
66800+ reqmode & GR_WRITE ? " writing" : reqmode &
66801+ GR_APPEND ? " appending" : "");
66802+ return 0;
66803+ } else if (unlikely((mode & reqmode) != reqmode))
66804+ return 0;
66805+
66806+ return reqmode;
66807+}
66808+
66809+__u32
66810+gr_acl_handle_creat(const struct dentry * dentry,
66811+ const struct dentry * p_dentry,
66812+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
66813+ const int imode)
66814+{
66815+ __u32 reqmode = GR_WRITE | GR_CREATE;
66816+ __u32 mode;
66817+
66818+ if (acc_mode & MAY_APPEND)
66819+ reqmode |= GR_APPEND;
66820+ // if a directory was required or the directory already exists, then
66821+ // don't count this open as a read
66822+ if ((acc_mode & MAY_READ) &&
66823+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
66824+ reqmode |= GR_READ;
66825+ if ((open_flags & O_CREAT) &&
66826+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
66827+ reqmode |= GR_SETID;
66828+
66829+ mode =
66830+ gr_check_create(dentry, p_dentry, p_mnt,
66831+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
66832+
66833+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
66834+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
66835+ reqmode & GR_READ ? " reading" : "",
66836+ reqmode & GR_WRITE ? " writing" : reqmode &
66837+ GR_APPEND ? " appending" : "");
66838+ return reqmode;
66839+ } else
66840+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
66841+ {
66842+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
66843+ reqmode & GR_READ ? " reading" : "",
66844+ reqmode & GR_WRITE ? " writing" : reqmode &
66845+ GR_APPEND ? " appending" : "");
66846+ return 0;
66847+ } else if (unlikely((mode & reqmode) != reqmode))
66848+ return 0;
66849+
66850+ return reqmode;
66851+}
66852+
66853+__u32
66854+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
66855+ const int fmode)
66856+{
66857+ __u32 mode, reqmode = GR_FIND;
66858+
66859+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
66860+ reqmode |= GR_EXEC;
66861+ if (fmode & S_IWOTH)
66862+ reqmode |= GR_WRITE;
66863+ if (fmode & S_IROTH)
66864+ reqmode |= GR_READ;
66865+
66866+ mode =
66867+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
66868+ mnt);
66869+
66870+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
66871+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
66872+ reqmode & GR_READ ? " reading" : "",
66873+ reqmode & GR_WRITE ? " writing" : "",
66874+ reqmode & GR_EXEC ? " executing" : "");
66875+ return reqmode;
66876+ } else
66877+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
66878+ {
66879+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
66880+ reqmode & GR_READ ? " reading" : "",
66881+ reqmode & GR_WRITE ? " writing" : "",
66882+ reqmode & GR_EXEC ? " executing" : "");
66883+ return 0;
66884+ } else if (unlikely((mode & reqmode) != reqmode))
66885+ return 0;
66886+
66887+ return reqmode;
66888+}
66889+
66890+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
66891+{
66892+ __u32 mode;
66893+
66894+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
66895+
66896+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
66897+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
66898+ return mode;
66899+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
66900+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
66901+ return 0;
66902+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
66903+ return 0;
66904+
66905+ return (reqmode);
66906+}
66907+
66908+__u32
66909+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
66910+{
66911+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
66912+}
66913+
66914+__u32
66915+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
66916+{
66917+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
66918+}
66919+
66920+__u32
66921+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
66922+{
66923+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
66924+}
66925+
66926+__u32
66927+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
66928+{
66929+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
66930+}
66931+
66932+__u32
66933+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
66934+ umode_t *modeptr)
66935+{
66936+ umode_t mode;
66937+
66938+ *modeptr &= ~gr_acl_umask();
66939+ mode = *modeptr;
66940+
66941+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
66942+ return 1;
66943+
66944+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
66945+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
66946+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
66947+ GR_CHMOD_ACL_MSG);
66948+ } else {
66949+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
66950+ }
66951+}
66952+
66953+__u32
66954+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
66955+{
66956+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
66957+}
66958+
66959+__u32
66960+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
66961+{
66962+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
66963+}
66964+
66965+__u32
66966+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
66967+{
66968+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
66969+}
66970+
66971+__u32
66972+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
66973+{
66974+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
66975+ GR_UNIXCONNECT_ACL_MSG);
66976+}
66977+
66978+/* hardlinks require at minimum create and link permission,
66979+ any additional privilege required is based on the
66980+ privilege of the file being linked to
66981+*/
66982+__u32
66983+gr_acl_handle_link(const struct dentry * new_dentry,
66984+ const struct dentry * parent_dentry,
66985+ const struct vfsmount * parent_mnt,
66986+ const struct dentry * old_dentry,
66987+ const struct vfsmount * old_mnt, const struct filename *to)
66988+{
66989+ __u32 mode;
66990+ __u32 needmode = GR_CREATE | GR_LINK;
66991+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
66992+
66993+ mode =
66994+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
66995+ old_mnt);
66996+
66997+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
66998+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
66999+ return mode;
67000+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
67001+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
67002+ return 0;
67003+ } else if (unlikely((mode & needmode) != needmode))
67004+ return 0;
67005+
67006+ return 1;
67007+}
67008+
67009+__u32
67010+gr_acl_handle_symlink(const struct dentry * new_dentry,
67011+ const struct dentry * parent_dentry,
67012+ const struct vfsmount * parent_mnt, const struct filename *from)
67013+{
67014+ __u32 needmode = GR_WRITE | GR_CREATE;
67015+ __u32 mode;
67016+
67017+ mode =
67018+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
67019+ GR_CREATE | GR_AUDIT_CREATE |
67020+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
67021+
67022+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
67023+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
67024+ return mode;
67025+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
67026+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
67027+ return 0;
67028+ } else if (unlikely((mode & needmode) != needmode))
67029+ return 0;
67030+
67031+ return (GR_WRITE | GR_CREATE);
67032+}
67033+
67034+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
67035+{
67036+ __u32 mode;
67037+
67038+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
67039+
67040+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
67041+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
67042+ return mode;
67043+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
67044+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
67045+ return 0;
67046+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
67047+ return 0;
67048+
67049+ return (reqmode);
67050+}
67051+
67052+__u32
67053+gr_acl_handle_mknod(const struct dentry * new_dentry,
67054+ const struct dentry * parent_dentry,
67055+ const struct vfsmount * parent_mnt,
67056+ const int mode)
67057+{
67058+ __u32 reqmode = GR_WRITE | GR_CREATE;
67059+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
67060+ reqmode |= GR_SETID;
67061+
67062+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
67063+ reqmode, GR_MKNOD_ACL_MSG);
67064+}
67065+
67066+__u32
67067+gr_acl_handle_mkdir(const struct dentry *new_dentry,
67068+ const struct dentry *parent_dentry,
67069+ const struct vfsmount *parent_mnt)
67070+{
67071+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
67072+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
67073+}
67074+
67075+#define RENAME_CHECK_SUCCESS(old, new) \
67076+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
67077+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
67078+
67079+int
67080+gr_acl_handle_rename(struct dentry *new_dentry,
67081+ struct dentry *parent_dentry,
67082+ const struct vfsmount *parent_mnt,
67083+ struct dentry *old_dentry,
67084+ struct inode *old_parent_inode,
67085+ struct vfsmount *old_mnt, const struct filename *newname)
67086+{
67087+ __u32 comp1, comp2;
67088+ int error = 0;
67089+
67090+ if (unlikely(!gr_acl_is_enabled()))
67091+ return 0;
67092+
67093+ if (!new_dentry->d_inode) {
67094+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
67095+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
67096+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
67097+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
67098+ GR_DELETE | GR_AUDIT_DELETE |
67099+ GR_AUDIT_READ | GR_AUDIT_WRITE |
67100+ GR_SUPPRESS, old_mnt);
67101+ } else {
67102+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
67103+ GR_CREATE | GR_DELETE |
67104+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
67105+ GR_AUDIT_READ | GR_AUDIT_WRITE |
67106+ GR_SUPPRESS, parent_mnt);
67107+ comp2 =
67108+ gr_search_file(old_dentry,
67109+ GR_READ | GR_WRITE | GR_AUDIT_READ |
67110+ GR_DELETE | GR_AUDIT_DELETE |
67111+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
67112+ }
67113+
67114+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
67115+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
67116+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
67117+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
67118+ && !(comp2 & GR_SUPPRESS)) {
67119+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
67120+ error = -EACCES;
67121+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
67122+ error = -EACCES;
67123+
67124+ return error;
67125+}
67126+
67127+void
67128+gr_acl_handle_exit(void)
67129+{
67130+ u16 id;
67131+ char *rolename;
67132+
67133+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
67134+ !(current->role->roletype & GR_ROLE_PERSIST))) {
67135+ id = current->acl_role_id;
67136+ rolename = current->role->rolename;
67137+ gr_set_acls(1);
67138+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
67139+ }
67140+
67141+ gr_put_exec_file(current);
67142+ return;
67143+}
67144+
67145+int
67146+gr_acl_handle_procpidmem(const struct task_struct *task)
67147+{
67148+ if (unlikely(!gr_acl_is_enabled()))
67149+ return 0;
67150+
67151+ if (task != current && task->acl->mode & GR_PROTPROCFD)
67152+ return -EACCES;
67153+
67154+ return 0;
67155+}
67156diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
67157new file mode 100644
67158index 0000000..8132048
67159--- /dev/null
67160+++ b/grsecurity/gracl_ip.c
67161@@ -0,0 +1,387 @@
67162+#include <linux/kernel.h>
67163+#include <asm/uaccess.h>
67164+#include <asm/errno.h>
67165+#include <net/sock.h>
67166+#include <linux/file.h>
67167+#include <linux/fs.h>
67168+#include <linux/net.h>
67169+#include <linux/in.h>
67170+#include <linux/skbuff.h>
67171+#include <linux/ip.h>
67172+#include <linux/udp.h>
67173+#include <linux/types.h>
67174+#include <linux/sched.h>
67175+#include <linux/netdevice.h>
67176+#include <linux/inetdevice.h>
67177+#include <linux/gracl.h>
67178+#include <linux/grsecurity.h>
67179+#include <linux/grinternal.h>
67180+
67181+#define GR_BIND 0x01
67182+#define GR_CONNECT 0x02
67183+#define GR_INVERT 0x04
67184+#define GR_BINDOVERRIDE 0x08
67185+#define GR_CONNECTOVERRIDE 0x10
67186+#define GR_SOCK_FAMILY 0x20
67187+
67188+static const char * gr_protocols[IPPROTO_MAX] = {
67189+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
67190+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
67191+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
67192+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
67193+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
67194+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
67195+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
67196+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
67197+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
67198+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
67199+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
67200+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
67201+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
67202+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
67203+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
67204+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
67205+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
67206+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
67207+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
67208+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
67209+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
67210+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
67211+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
67212+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
67213+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
67214+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
67215+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
67216+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
67217+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
67218+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
67219+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
67220+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
67221+ };
67222+
67223+static const char * gr_socktypes[SOCK_MAX] = {
67224+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
67225+ "unknown:7", "unknown:8", "unknown:9", "packet"
67226+ };
67227+
67228+static const char * gr_sockfamilies[AF_MAX+1] = {
67229+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
67230+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
67231+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
67232+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
67233+ };
67234+
67235+const char *
67236+gr_proto_to_name(unsigned char proto)
67237+{
67238+ return gr_protocols[proto];
67239+}
67240+
67241+const char *
67242+gr_socktype_to_name(unsigned char type)
67243+{
67244+ return gr_socktypes[type];
67245+}
67246+
67247+const char *
67248+gr_sockfamily_to_name(unsigned char family)
67249+{
67250+ return gr_sockfamilies[family];
67251+}
67252+
67253+int
67254+gr_search_socket(const int domain, const int type, const int protocol)
67255+{
67256+ struct acl_subject_label *curr;
67257+ const struct cred *cred = current_cred();
67258+
67259+ if (unlikely(!gr_acl_is_enabled()))
67260+ goto exit;
67261+
67262+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
67263+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
67264+ goto exit; // let the kernel handle it
67265+
67266+ curr = current->acl;
67267+
67268+ if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
67269+ /* the family is allowed, if this is PF_INET allow it only if
67270+ the extra sock type/protocol checks pass */
67271+ if (domain == PF_INET)
67272+ goto inet_check;
67273+ goto exit;
67274+ } else {
67275+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
67276+ __u32 fakeip = 0;
67277+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
67278+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
67279+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
67280+ gr_to_filename(current->exec_file->f_path.dentry,
67281+ current->exec_file->f_path.mnt) :
67282+ curr->filename, curr->filename,
67283+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
67284+ &current->signal->saved_ip);
67285+ goto exit;
67286+ }
67287+ goto exit_fail;
67288+ }
67289+
67290+inet_check:
67291+ /* the rest of this checking is for IPv4 only */
67292+ if (!curr->ips)
67293+ goto exit;
67294+
67295+ if ((curr->ip_type & (1U << type)) &&
67296+ (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
67297+ goto exit;
67298+
67299+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
67300+ /* we don't place acls on raw sockets , and sometimes
67301+ dgram/ip sockets are opened for ioctl and not
67302+ bind/connect, so we'll fake a bind learn log */
67303+ if (type == SOCK_RAW || type == SOCK_PACKET) {
67304+ __u32 fakeip = 0;
67305+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
67306+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
67307+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
67308+ gr_to_filename(current->exec_file->f_path.dentry,
67309+ current->exec_file->f_path.mnt) :
67310+ curr->filename, curr->filename,
67311+ &fakeip, 0, type,
67312+ protocol, GR_CONNECT, &current->signal->saved_ip);
67313+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
67314+ __u32 fakeip = 0;
67315+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
67316+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
67317+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
67318+ gr_to_filename(current->exec_file->f_path.dentry,
67319+ current->exec_file->f_path.mnt) :
67320+ curr->filename, curr->filename,
67321+ &fakeip, 0, type,
67322+ protocol, GR_BIND, &current->signal->saved_ip);
67323+ }
67324+ /* we'll log when they use connect or bind */
67325+ goto exit;
67326+ }
67327+
67328+exit_fail:
67329+ if (domain == PF_INET)
67330+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
67331+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
67332+ else
67333+#ifndef CONFIG_IPV6
67334+ if (domain != PF_INET6)
67335+#endif
67336+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
67337+ gr_socktype_to_name(type), protocol);
67338+
67339+ return 0;
67340+exit:
67341+ return 1;
67342+}
67343+
67344+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
67345+{
67346+ if ((ip->mode & mode) &&
67347+ (ip_port >= ip->low) &&
67348+ (ip_port <= ip->high) &&
67349+ ((ntohl(ip_addr) & our_netmask) ==
67350+ (ntohl(our_addr) & our_netmask))
67351+ && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
67352+ && (ip->type & (1U << type))) {
67353+ if (ip->mode & GR_INVERT)
67354+ return 2; // specifically denied
67355+ else
67356+ return 1; // allowed
67357+ }
67358+
67359+ return 0; // not specifically allowed, may continue parsing
67360+}
67361+
67362+static int
67363+gr_search_connectbind(const int full_mode, struct sock *sk,
67364+ struct sockaddr_in *addr, const int type)
67365+{
67366+ char iface[IFNAMSIZ] = {0};
67367+ struct acl_subject_label *curr;
67368+ struct acl_ip_label *ip;
67369+ struct inet_sock *isk;
67370+ struct net_device *dev;
67371+ struct in_device *idev;
67372+ unsigned long i;
67373+ int ret;
67374+ int mode = full_mode & (GR_BIND | GR_CONNECT);
67375+ __u32 ip_addr = 0;
67376+ __u32 our_addr;
67377+ __u32 our_netmask;
67378+ char *p;
67379+ __u16 ip_port = 0;
67380+ const struct cred *cred = current_cred();
67381+
67382+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
67383+ return 0;
67384+
67385+ curr = current->acl;
67386+ isk = inet_sk(sk);
67387+
67388+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
67389+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
67390+ addr->sin_addr.s_addr = curr->inaddr_any_override;
67391+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
67392+ struct sockaddr_in saddr;
67393+ int err;
67394+
67395+ saddr.sin_family = AF_INET;
67396+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
67397+ saddr.sin_port = isk->inet_sport;
67398+
67399+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
67400+ if (err)
67401+ return err;
67402+
67403+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
67404+ if (err)
67405+ return err;
67406+ }
67407+
67408+ if (!curr->ips)
67409+ return 0;
67410+
67411+ ip_addr = addr->sin_addr.s_addr;
67412+ ip_port = ntohs(addr->sin_port);
67413+
67414+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
67415+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
67416+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
67417+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
67418+ gr_to_filename(current->exec_file->f_path.dentry,
67419+ current->exec_file->f_path.mnt) :
67420+ curr->filename, curr->filename,
67421+ &ip_addr, ip_port, type,
67422+ sk->sk_protocol, mode, &current->signal->saved_ip);
67423+ return 0;
67424+ }
67425+
67426+ for (i = 0; i < curr->ip_num; i++) {
67427+ ip = *(curr->ips + i);
67428+ if (ip->iface != NULL) {
67429+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
67430+ p = strchr(iface, ':');
67431+ if (p != NULL)
67432+ *p = '\0';
67433+ dev = dev_get_by_name(sock_net(sk), iface);
67434+ if (dev == NULL)
67435+ continue;
67436+ idev = in_dev_get(dev);
67437+ if (idev == NULL) {
67438+ dev_put(dev);
67439+ continue;
67440+ }
67441+ rcu_read_lock();
67442+ for_ifa(idev) {
67443+ if (!strcmp(ip->iface, ifa->ifa_label)) {
67444+ our_addr = ifa->ifa_address;
67445+ our_netmask = 0xffffffff;
67446+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
67447+ if (ret == 1) {
67448+ rcu_read_unlock();
67449+ in_dev_put(idev);
67450+ dev_put(dev);
67451+ return 0;
67452+ } else if (ret == 2) {
67453+ rcu_read_unlock();
67454+ in_dev_put(idev);
67455+ dev_put(dev);
67456+ goto denied;
67457+ }
67458+ }
67459+ } endfor_ifa(idev);
67460+ rcu_read_unlock();
67461+ in_dev_put(idev);
67462+ dev_put(dev);
67463+ } else {
67464+ our_addr = ip->addr;
67465+ our_netmask = ip->netmask;
67466+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
67467+ if (ret == 1)
67468+ return 0;
67469+ else if (ret == 2)
67470+ goto denied;
67471+ }
67472+ }
67473+
67474+denied:
67475+ if (mode == GR_BIND)
67476+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
67477+ else if (mode == GR_CONNECT)
67478+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
67479+
67480+ return -EACCES;
67481+}
67482+
67483+int
67484+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
67485+{
67486+ /* always allow disconnection of dgram sockets with connect */
67487+ if (addr->sin_family == AF_UNSPEC)
67488+ return 0;
67489+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
67490+}
67491+
67492+int
67493+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
67494+{
67495+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
67496+}
67497+
67498+int gr_search_listen(struct socket *sock)
67499+{
67500+ struct sock *sk = sock->sk;
67501+ struct sockaddr_in addr;
67502+
67503+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
67504+ addr.sin_port = inet_sk(sk)->inet_sport;
67505+
67506+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
67507+}
67508+
67509+int gr_search_accept(struct socket *sock)
67510+{
67511+ struct sock *sk = sock->sk;
67512+ struct sockaddr_in addr;
67513+
67514+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
67515+ addr.sin_port = inet_sk(sk)->inet_sport;
67516+
67517+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
67518+}
67519+
67520+int
67521+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
67522+{
67523+ if (addr)
67524+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
67525+ else {
67526+ struct sockaddr_in sin;
67527+ const struct inet_sock *inet = inet_sk(sk);
67528+
67529+ sin.sin_addr.s_addr = inet->inet_daddr;
67530+ sin.sin_port = inet->inet_dport;
67531+
67532+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
67533+ }
67534+}
67535+
67536+int
67537+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
67538+{
67539+ struct sockaddr_in sin;
67540+
67541+ if (unlikely(skb->len < sizeof (struct udphdr)))
67542+ return 0; // skip this packet
67543+
67544+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
67545+ sin.sin_port = udp_hdr(skb)->source;
67546+
67547+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
67548+}
67549diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
67550new file mode 100644
67551index 0000000..25f54ef
67552--- /dev/null
67553+++ b/grsecurity/gracl_learn.c
67554@@ -0,0 +1,207 @@
67555+#include <linux/kernel.h>
67556+#include <linux/mm.h>
67557+#include <linux/sched.h>
67558+#include <linux/poll.h>
67559+#include <linux/string.h>
67560+#include <linux/file.h>
67561+#include <linux/types.h>
67562+#include <linux/vmalloc.h>
67563+#include <linux/grinternal.h>
67564+
67565+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
67566+ size_t count, loff_t *ppos);
67567+extern int gr_acl_is_enabled(void);
67568+
67569+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
67570+static int gr_learn_attached;
67571+
67572+/* use a 512k buffer */
67573+#define LEARN_BUFFER_SIZE (512 * 1024)
67574+
67575+static DEFINE_SPINLOCK(gr_learn_lock);
67576+static DEFINE_MUTEX(gr_learn_user_mutex);
67577+
67578+/* we need to maintain two buffers, so that the kernel context of grlearn
67579+ uses a semaphore around the userspace copying, and the other kernel contexts
67580+ use a spinlock when copying into the buffer, since they cannot sleep
67581+*/
67582+static char *learn_buffer;
67583+static char *learn_buffer_user;
67584+static int learn_buffer_len;
67585+static int learn_buffer_user_len;
67586+
67587+static ssize_t
67588+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
67589+{
67590+ DECLARE_WAITQUEUE(wait, current);
67591+ ssize_t retval = 0;
67592+
67593+ add_wait_queue(&learn_wait, &wait);
67594+ set_current_state(TASK_INTERRUPTIBLE);
67595+ do {
67596+ mutex_lock(&gr_learn_user_mutex);
67597+ spin_lock(&gr_learn_lock);
67598+ if (learn_buffer_len)
67599+ break;
67600+ spin_unlock(&gr_learn_lock);
67601+ mutex_unlock(&gr_learn_user_mutex);
67602+ if (file->f_flags & O_NONBLOCK) {
67603+ retval = -EAGAIN;
67604+ goto out;
67605+ }
67606+ if (signal_pending(current)) {
67607+ retval = -ERESTARTSYS;
67608+ goto out;
67609+ }
67610+
67611+ schedule();
67612+ } while (1);
67613+
67614+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
67615+ learn_buffer_user_len = learn_buffer_len;
67616+ retval = learn_buffer_len;
67617+ learn_buffer_len = 0;
67618+
67619+ spin_unlock(&gr_learn_lock);
67620+
67621+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
67622+ retval = -EFAULT;
67623+
67624+ mutex_unlock(&gr_learn_user_mutex);
67625+out:
67626+ set_current_state(TASK_RUNNING);
67627+ remove_wait_queue(&learn_wait, &wait);
67628+ return retval;
67629+}
67630+
67631+static unsigned int
67632+poll_learn(struct file * file, poll_table * wait)
67633+{
67634+ poll_wait(file, &learn_wait, wait);
67635+
67636+ if (learn_buffer_len)
67637+ return (POLLIN | POLLRDNORM);
67638+
67639+ return 0;
67640+}
67641+
67642+void
67643+gr_clear_learn_entries(void)
67644+{
67645+ char *tmp;
67646+
67647+ mutex_lock(&gr_learn_user_mutex);
67648+ spin_lock(&gr_learn_lock);
67649+ tmp = learn_buffer;
67650+ learn_buffer = NULL;
67651+ spin_unlock(&gr_learn_lock);
67652+ if (tmp)
67653+ vfree(tmp);
67654+ if (learn_buffer_user != NULL) {
67655+ vfree(learn_buffer_user);
67656+ learn_buffer_user = NULL;
67657+ }
67658+ learn_buffer_len = 0;
67659+ mutex_unlock(&gr_learn_user_mutex);
67660+
67661+ return;
67662+}
67663+
67664+void
67665+gr_add_learn_entry(const char *fmt, ...)
67666+{
67667+ va_list args;
67668+ unsigned int len;
67669+
67670+ if (!gr_learn_attached)
67671+ return;
67672+
67673+ spin_lock(&gr_learn_lock);
67674+
67675+ /* leave a gap at the end so we know when it's "full" but don't have to
67676+ compute the exact length of the string we're trying to append
67677+ */
67678+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
67679+ spin_unlock(&gr_learn_lock);
67680+ wake_up_interruptible(&learn_wait);
67681+ return;
67682+ }
67683+ if (learn_buffer == NULL) {
67684+ spin_unlock(&gr_learn_lock);
67685+ return;
67686+ }
67687+
67688+ va_start(args, fmt);
67689+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
67690+ va_end(args);
67691+
67692+ learn_buffer_len += len + 1;
67693+
67694+ spin_unlock(&gr_learn_lock);
67695+ wake_up_interruptible(&learn_wait);
67696+
67697+ return;
67698+}
67699+
67700+static int
67701+open_learn(struct inode *inode, struct file *file)
67702+{
67703+ if (file->f_mode & FMODE_READ && gr_learn_attached)
67704+ return -EBUSY;
67705+ if (file->f_mode & FMODE_READ) {
67706+ int retval = 0;
67707+ mutex_lock(&gr_learn_user_mutex);
67708+ if (learn_buffer == NULL)
67709+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
67710+ if (learn_buffer_user == NULL)
67711+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
67712+ if (learn_buffer == NULL) {
67713+ retval = -ENOMEM;
67714+ goto out_error;
67715+ }
67716+ if (learn_buffer_user == NULL) {
67717+ retval = -ENOMEM;
67718+ goto out_error;
67719+ }
67720+ learn_buffer_len = 0;
67721+ learn_buffer_user_len = 0;
67722+ gr_learn_attached = 1;
67723+out_error:
67724+ mutex_unlock(&gr_learn_user_mutex);
67725+ return retval;
67726+ }
67727+ return 0;
67728+}
67729+
67730+static int
67731+close_learn(struct inode *inode, struct file *file)
67732+{
67733+ if (file->f_mode & FMODE_READ) {
67734+ char *tmp = NULL;
67735+ mutex_lock(&gr_learn_user_mutex);
67736+ spin_lock(&gr_learn_lock);
67737+ tmp = learn_buffer;
67738+ learn_buffer = NULL;
67739+ spin_unlock(&gr_learn_lock);
67740+ if (tmp)
67741+ vfree(tmp);
67742+ if (learn_buffer_user != NULL) {
67743+ vfree(learn_buffer_user);
67744+ learn_buffer_user = NULL;
67745+ }
67746+ learn_buffer_len = 0;
67747+ learn_buffer_user_len = 0;
67748+ gr_learn_attached = 0;
67749+ mutex_unlock(&gr_learn_user_mutex);
67750+ }
67751+
67752+ return 0;
67753+}
67754+
67755+const struct file_operations grsec_fops = {
67756+ .read = read_learn,
67757+ .write = write_grsec_handler,
67758+ .open = open_learn,
67759+ .release = close_learn,
67760+ .poll = poll_learn,
67761+};
67762diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
67763new file mode 100644
67764index 0000000..39645c9
67765--- /dev/null
67766+++ b/grsecurity/gracl_res.c
67767@@ -0,0 +1,68 @@
67768+#include <linux/kernel.h>
67769+#include <linux/sched.h>
67770+#include <linux/gracl.h>
67771+#include <linux/grinternal.h>
67772+
67773+static const char *restab_log[] = {
67774+ [RLIMIT_CPU] = "RLIMIT_CPU",
67775+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
67776+ [RLIMIT_DATA] = "RLIMIT_DATA",
67777+ [RLIMIT_STACK] = "RLIMIT_STACK",
67778+ [RLIMIT_CORE] = "RLIMIT_CORE",
67779+ [RLIMIT_RSS] = "RLIMIT_RSS",
67780+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
67781+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
67782+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
67783+ [RLIMIT_AS] = "RLIMIT_AS",
67784+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
67785+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
67786+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
67787+ [RLIMIT_NICE] = "RLIMIT_NICE",
67788+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
67789+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
67790+ [GR_CRASH_RES] = "RLIMIT_CRASH"
67791+};
67792+
67793+void
67794+gr_log_resource(const struct task_struct *task,
67795+ const int res, const unsigned long wanted, const int gt)
67796+{
67797+ const struct cred *cred;
67798+ unsigned long rlim;
67799+
67800+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
67801+ return;
67802+
67803+ // not yet supported resource
67804+ if (unlikely(!restab_log[res]))
67805+ return;
67806+
67807+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
67808+ rlim = task_rlimit_max(task, res);
67809+ else
67810+ rlim = task_rlimit(task, res);
67811+
67812+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
67813+ return;
67814+
67815+ rcu_read_lock();
67816+ cred = __task_cred(task);
67817+
67818+ if (res == RLIMIT_NPROC &&
67819+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
67820+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
67821+ goto out_rcu_unlock;
67822+ else if (res == RLIMIT_MEMLOCK &&
67823+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
67824+ goto out_rcu_unlock;
67825+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
67826+ goto out_rcu_unlock;
67827+ rcu_read_unlock();
67828+
67829+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
67830+
67831+ return;
67832+out_rcu_unlock:
67833+ rcu_read_unlock();
67834+ return;
67835+}
67836diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
67837new file mode 100644
67838index 0000000..3c38bfe
67839--- /dev/null
67840+++ b/grsecurity/gracl_segv.c
67841@@ -0,0 +1,305 @@
67842+#include <linux/kernel.h>
67843+#include <linux/mm.h>
67844+#include <asm/uaccess.h>
67845+#include <asm/errno.h>
67846+#include <asm/mman.h>
67847+#include <net/sock.h>
67848+#include <linux/file.h>
67849+#include <linux/fs.h>
67850+#include <linux/net.h>
67851+#include <linux/in.h>
67852+#include <linux/slab.h>
67853+#include <linux/types.h>
67854+#include <linux/sched.h>
67855+#include <linux/timer.h>
67856+#include <linux/gracl.h>
67857+#include <linux/grsecurity.h>
67858+#include <linux/grinternal.h>
67859+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
67860+#include <linux/magic.h>
67861+#include <linux/pagemap.h>
67862+#include "../fs/btrfs/async-thread.h"
67863+#include "../fs/btrfs/ctree.h"
67864+#include "../fs/btrfs/btrfs_inode.h"
67865+#endif
67866+
67867+static struct crash_uid *uid_set;
67868+static unsigned short uid_used;
67869+static DEFINE_SPINLOCK(gr_uid_lock);
67870+extern rwlock_t gr_inode_lock;
67871+extern struct acl_subject_label *
67872+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
67873+ struct acl_role_label *role);
67874+
67875+static inline dev_t __get_dev(const struct dentry *dentry)
67876+{
67877+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
67878+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
67879+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
67880+ else
67881+#endif
67882+ return dentry->d_sb->s_dev;
67883+}
67884+
67885+int
67886+gr_init_uidset(void)
67887+{
67888+ uid_set =
67889+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
67890+ uid_used = 0;
67891+
67892+ return uid_set ? 1 : 0;
67893+}
67894+
67895+void
67896+gr_free_uidset(void)
67897+{
67898+ if (uid_set)
67899+ kfree(uid_set);
67900+
67901+ return;
67902+}
67903+
67904+int
67905+gr_find_uid(const uid_t uid)
67906+{
67907+ struct crash_uid *tmp = uid_set;
67908+ uid_t buid;
67909+ int low = 0, high = uid_used - 1, mid;
67910+
67911+ while (high >= low) {
67912+ mid = (low + high) >> 1;
67913+ buid = tmp[mid].uid;
67914+ if (buid == uid)
67915+ return mid;
67916+ if (buid > uid)
67917+ high = mid - 1;
67918+ if (buid < uid)
67919+ low = mid + 1;
67920+ }
67921+
67922+ return -1;
67923+}
67924+
67925+static __inline__ void
67926+gr_insertsort(void)
67927+{
67928+ unsigned short i, j;
67929+ struct crash_uid index;
67930+
67931+ for (i = 1; i < uid_used; i++) {
67932+ index = uid_set[i];
67933+ j = i;
67934+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
67935+ uid_set[j] = uid_set[j - 1];
67936+ j--;
67937+ }
67938+ uid_set[j] = index;
67939+ }
67940+
67941+ return;
67942+}
67943+
67944+static __inline__ void
67945+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
67946+{
67947+ int loc;
67948+ uid_t uid = GR_GLOBAL_UID(kuid);
67949+
67950+ if (uid_used == GR_UIDTABLE_MAX)
67951+ return;
67952+
67953+ loc = gr_find_uid(uid);
67954+
67955+ if (loc >= 0) {
67956+ uid_set[loc].expires = expires;
67957+ return;
67958+ }
67959+
67960+ uid_set[uid_used].uid = uid;
67961+ uid_set[uid_used].expires = expires;
67962+ uid_used++;
67963+
67964+ gr_insertsort();
67965+
67966+ return;
67967+}
67968+
67969+void
67970+gr_remove_uid(const unsigned short loc)
67971+{
67972+ unsigned short i;
67973+
67974+ for (i = loc + 1; i < uid_used; i++)
67975+ uid_set[i - 1] = uid_set[i];
67976+
67977+ uid_used--;
67978+
67979+ return;
67980+}
67981+
67982+int
67983+gr_check_crash_uid(const kuid_t kuid)
67984+{
67985+ int loc;
67986+ int ret = 0;
67987+ uid_t uid;
67988+
67989+ if (unlikely(!gr_acl_is_enabled()))
67990+ return 0;
67991+
67992+ uid = GR_GLOBAL_UID(kuid);
67993+
67994+ spin_lock(&gr_uid_lock);
67995+ loc = gr_find_uid(uid);
67996+
67997+ if (loc < 0)
67998+ goto out_unlock;
67999+
68000+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
68001+ gr_remove_uid(loc);
68002+ else
68003+ ret = 1;
68004+
68005+out_unlock:
68006+ spin_unlock(&gr_uid_lock);
68007+ return ret;
68008+}
68009+
68010+static __inline__ int
68011+proc_is_setxid(const struct cred *cred)
68012+{
68013+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
68014+ !uid_eq(cred->uid, cred->fsuid))
68015+ return 1;
68016+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
68017+ !gid_eq(cred->gid, cred->fsgid))
68018+ return 1;
68019+
68020+ return 0;
68021+}
68022+
68023+extern int gr_fake_force_sig(int sig, struct task_struct *t);
68024+
68025+void
68026+gr_handle_crash(struct task_struct *task, const int sig)
68027+{
68028+ struct acl_subject_label *curr;
68029+ struct task_struct *tsk, *tsk2;
68030+ const struct cred *cred;
68031+ const struct cred *cred2;
68032+
68033+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
68034+ return;
68035+
68036+ if (unlikely(!gr_acl_is_enabled()))
68037+ return;
68038+
68039+ curr = task->acl;
68040+
68041+ if (!(curr->resmask & (1U << GR_CRASH_RES)))
68042+ return;
68043+
68044+ if (time_before_eq(curr->expires, get_seconds())) {
68045+ curr->expires = 0;
68046+ curr->crashes = 0;
68047+ }
68048+
68049+ curr->crashes++;
68050+
68051+ if (!curr->expires)
68052+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
68053+
68054+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
68055+ time_after(curr->expires, get_seconds())) {
68056+ rcu_read_lock();
68057+ cred = __task_cred(task);
68058+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
68059+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
68060+ spin_lock(&gr_uid_lock);
68061+ gr_insert_uid(cred->uid, curr->expires);
68062+ spin_unlock(&gr_uid_lock);
68063+ curr->expires = 0;
68064+ curr->crashes = 0;
68065+ read_lock(&tasklist_lock);
68066+ do_each_thread(tsk2, tsk) {
68067+ cred2 = __task_cred(tsk);
68068+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
68069+ gr_fake_force_sig(SIGKILL, tsk);
68070+ } while_each_thread(tsk2, tsk);
68071+ read_unlock(&tasklist_lock);
68072+ } else {
68073+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
68074+ read_lock(&tasklist_lock);
68075+ read_lock(&grsec_exec_file_lock);
68076+ do_each_thread(tsk2, tsk) {
68077+ if (likely(tsk != task)) {
68078+ // if this thread has the same subject as the one that triggered
68079+ // RES_CRASH and it's the same binary, kill it
68080+ if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file))
68081+ gr_fake_force_sig(SIGKILL, tsk);
68082+ }
68083+ } while_each_thread(tsk2, tsk);
68084+ read_unlock(&grsec_exec_file_lock);
68085+ read_unlock(&tasklist_lock);
68086+ }
68087+ rcu_read_unlock();
68088+ }
68089+
68090+ return;
68091+}
68092+
68093+int
68094+gr_check_crash_exec(const struct file *filp)
68095+{
68096+ struct acl_subject_label *curr;
68097+
68098+ if (unlikely(!gr_acl_is_enabled()))
68099+ return 0;
68100+
68101+ read_lock(&gr_inode_lock);
68102+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
68103+ __get_dev(filp->f_path.dentry),
68104+ current->role);
68105+ read_unlock(&gr_inode_lock);
68106+
68107+ if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
68108+ (!curr->crashes && !curr->expires))
68109+ return 0;
68110+
68111+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
68112+ time_after(curr->expires, get_seconds()))
68113+ return 1;
68114+ else if (time_before_eq(curr->expires, get_seconds())) {
68115+ curr->crashes = 0;
68116+ curr->expires = 0;
68117+ }
68118+
68119+ return 0;
68120+}
68121+
68122+void
68123+gr_handle_alertkill(struct task_struct *task)
68124+{
68125+ struct acl_subject_label *curracl;
68126+ __u32 curr_ip;
68127+ struct task_struct *p, *p2;
68128+
68129+ if (unlikely(!gr_acl_is_enabled()))
68130+ return;
68131+
68132+ curracl = task->acl;
68133+ curr_ip = task->signal->curr_ip;
68134+
68135+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
68136+ read_lock(&tasklist_lock);
68137+ do_each_thread(p2, p) {
68138+ if (p->signal->curr_ip == curr_ip)
68139+ gr_fake_force_sig(SIGKILL, p);
68140+ } while_each_thread(p2, p);
68141+ read_unlock(&tasklist_lock);
68142+ } else if (curracl->mode & GR_KILLPROC)
68143+ gr_fake_force_sig(SIGKILL, task);
68144+
68145+ return;
68146+}
68147diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
68148new file mode 100644
68149index 0000000..98011b0
68150--- /dev/null
68151+++ b/grsecurity/gracl_shm.c
68152@@ -0,0 +1,40 @@
68153+#include <linux/kernel.h>
68154+#include <linux/mm.h>
68155+#include <linux/sched.h>
68156+#include <linux/file.h>
68157+#include <linux/ipc.h>
68158+#include <linux/gracl.h>
68159+#include <linux/grsecurity.h>
68160+#include <linux/grinternal.h>
68161+
68162+int
68163+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
68164+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
68165+{
68166+ struct task_struct *task;
68167+
68168+ if (!gr_acl_is_enabled())
68169+ return 1;
68170+
68171+ rcu_read_lock();
68172+ read_lock(&tasklist_lock);
68173+
68174+ task = find_task_by_vpid(shm_cprid);
68175+
68176+ if (unlikely(!task))
68177+ task = find_task_by_vpid(shm_lapid);
68178+
68179+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
68180+ (task_pid_nr(task) == shm_lapid)) &&
68181+ (task->acl->mode & GR_PROTSHM) &&
68182+ (task->acl != current->acl))) {
68183+ read_unlock(&tasklist_lock);
68184+ rcu_read_unlock();
68185+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
68186+ return 0;
68187+ }
68188+ read_unlock(&tasklist_lock);
68189+ rcu_read_unlock();
68190+
68191+ return 1;
68192+}
68193diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
68194new file mode 100644
68195index 0000000..bc0be01
68196--- /dev/null
68197+++ b/grsecurity/grsec_chdir.c
68198@@ -0,0 +1,19 @@
68199+#include <linux/kernel.h>
68200+#include <linux/sched.h>
68201+#include <linux/fs.h>
68202+#include <linux/file.h>
68203+#include <linux/grsecurity.h>
68204+#include <linux/grinternal.h>
68205+
68206+void
68207+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
68208+{
68209+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
68210+ if ((grsec_enable_chdir && grsec_enable_group &&
68211+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
68212+ !grsec_enable_group)) {
68213+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
68214+ }
68215+#endif
68216+ return;
68217+}
68218diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
68219new file mode 100644
68220index 0000000..bd6e105
68221--- /dev/null
68222+++ b/grsecurity/grsec_chroot.c
68223@@ -0,0 +1,370 @@
68224+#include <linux/kernel.h>
68225+#include <linux/module.h>
68226+#include <linux/sched.h>
68227+#include <linux/file.h>
68228+#include <linux/fs.h>
68229+#include <linux/mount.h>
68230+#include <linux/types.h>
68231+#include "../fs/mount.h"
68232+#include <linux/grsecurity.h>
68233+#include <linux/grinternal.h>
68234+
68235+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
68236+static int gr_init_ran;
68237+#endif
68238+
68239+void gr_set_chroot_entries(struct task_struct *task, const struct path *path)
68240+{
68241+#ifdef CONFIG_GRKERNSEC
68242+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
68243+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root
68244+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
68245+ && gr_init_ran
68246+#endif
68247+ )
68248+ task->gr_is_chrooted = 1;
68249+ else {
68250+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
68251+ if (task_pid_nr(task) == 1 && !gr_init_ran)
68252+ gr_init_ran = 1;
68253+#endif
68254+ task->gr_is_chrooted = 0;
68255+ }
68256+
68257+ task->gr_chroot_dentry = path->dentry;
68258+#endif
68259+ return;
68260+}
68261+
68262+void gr_clear_chroot_entries(struct task_struct *task)
68263+{
68264+#ifdef CONFIG_GRKERNSEC
68265+ task->gr_is_chrooted = 0;
68266+ task->gr_chroot_dentry = NULL;
68267+#endif
68268+ return;
68269+}
68270+
68271+int
68272+gr_handle_chroot_unix(const pid_t pid)
68273+{
68274+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
68275+ struct task_struct *p;
68276+
68277+ if (unlikely(!grsec_enable_chroot_unix))
68278+ return 1;
68279+
68280+ if (likely(!proc_is_chrooted(current)))
68281+ return 1;
68282+
68283+ rcu_read_lock();
68284+ read_lock(&tasklist_lock);
68285+ p = find_task_by_vpid_unrestricted(pid);
68286+ if (unlikely(p && !have_same_root(current, p))) {
68287+ read_unlock(&tasklist_lock);
68288+ rcu_read_unlock();
68289+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
68290+ return 0;
68291+ }
68292+ read_unlock(&tasklist_lock);
68293+ rcu_read_unlock();
68294+#endif
68295+ return 1;
68296+}
68297+
68298+int
68299+gr_handle_chroot_nice(void)
68300+{
68301+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
68302+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
68303+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
68304+ return -EPERM;
68305+ }
68306+#endif
68307+ return 0;
68308+}
68309+
68310+int
68311+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
68312+{
68313+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
68314+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
68315+ && proc_is_chrooted(current)) {
68316+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
68317+ return -EACCES;
68318+ }
68319+#endif
68320+ return 0;
68321+}
68322+
68323+int
68324+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
68325+{
68326+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
68327+ struct task_struct *p;
68328+ int ret = 0;
68329+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
68330+ return ret;
68331+
68332+ read_lock(&tasklist_lock);
68333+ do_each_pid_task(pid, type, p) {
68334+ if (!have_same_root(current, p)) {
68335+ ret = 1;
68336+ goto out;
68337+ }
68338+ } while_each_pid_task(pid, type, p);
68339+out:
68340+ read_unlock(&tasklist_lock);
68341+ return ret;
68342+#endif
68343+ return 0;
68344+}
68345+
68346+int
68347+gr_pid_is_chrooted(struct task_struct *p)
68348+{
68349+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
68350+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
68351+ return 0;
68352+
68353+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
68354+ !have_same_root(current, p)) {
68355+ return 1;
68356+ }
68357+#endif
68358+ return 0;
68359+}
68360+
68361+EXPORT_SYMBOL(gr_pid_is_chrooted);
68362+
68363+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
68364+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
68365+{
68366+ struct path path, currentroot;
68367+ int ret = 0;
68368+
68369+ path.dentry = (struct dentry *)u_dentry;
68370+ path.mnt = (struct vfsmount *)u_mnt;
68371+ get_fs_root(current->fs, &currentroot);
68372+ if (path_is_under(&path, &currentroot))
68373+ ret = 1;
68374+ path_put(&currentroot);
68375+
68376+ return ret;
68377+}
68378+#endif
68379+
68380+int
68381+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
68382+{
68383+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
68384+ if (!grsec_enable_chroot_fchdir)
68385+ return 1;
68386+
68387+ if (!proc_is_chrooted(current))
68388+ return 1;
68389+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
68390+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
68391+ return 0;
68392+ }
68393+#endif
68394+ return 1;
68395+}
68396+
68397+int
68398+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
68399+ const time_t shm_createtime)
68400+{
68401+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
68402+ struct task_struct *p;
68403+ time_t starttime;
68404+
68405+ if (unlikely(!grsec_enable_chroot_shmat))
68406+ return 1;
68407+
68408+ if (likely(!proc_is_chrooted(current)))
68409+ return 1;
68410+
68411+ rcu_read_lock();
68412+ read_lock(&tasklist_lock);
68413+
68414+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
68415+ starttime = p->start_time.tv_sec;
68416+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
68417+ if (have_same_root(current, p)) {
68418+ goto allow;
68419+ } else {
68420+ read_unlock(&tasklist_lock);
68421+ rcu_read_unlock();
68422+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
68423+ return 0;
68424+ }
68425+ }
68426+ /* creator exited, pid reuse, fall through to next check */
68427+ }
68428+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
68429+ if (unlikely(!have_same_root(current, p))) {
68430+ read_unlock(&tasklist_lock);
68431+ rcu_read_unlock();
68432+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
68433+ return 0;
68434+ }
68435+ }
68436+
68437+allow:
68438+ read_unlock(&tasklist_lock);
68439+ rcu_read_unlock();
68440+#endif
68441+ return 1;
68442+}
68443+
68444+void
68445+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
68446+{
68447+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
68448+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
68449+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
68450+#endif
68451+ return;
68452+}
68453+
68454+int
68455+gr_handle_chroot_mknod(const struct dentry *dentry,
68456+ const struct vfsmount *mnt, const int mode)
68457+{
68458+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
68459+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
68460+ proc_is_chrooted(current)) {
68461+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
68462+ return -EPERM;
68463+ }
68464+#endif
68465+ return 0;
68466+}
68467+
68468+int
68469+gr_handle_chroot_mount(const struct dentry *dentry,
68470+ const struct vfsmount *mnt, const char *dev_name)
68471+{
68472+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
68473+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
68474+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
68475+ return -EPERM;
68476+ }
68477+#endif
68478+ return 0;
68479+}
68480+
68481+int
68482+gr_handle_chroot_pivot(void)
68483+{
68484+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
68485+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
68486+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
68487+ return -EPERM;
68488+ }
68489+#endif
68490+ return 0;
68491+}
68492+
68493+int
68494+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
68495+{
68496+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
68497+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
68498+ !gr_is_outside_chroot(dentry, mnt)) {
68499+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
68500+ return -EPERM;
68501+ }
68502+#endif
68503+ return 0;
68504+}
68505+
68506+extern const char *captab_log[];
68507+extern int captab_log_entries;
68508+
68509+int
68510+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
68511+{
68512+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
68513+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
68514+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
68515+ if (cap_raised(chroot_caps, cap)) {
68516+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
68517+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
68518+ }
68519+ return 0;
68520+ }
68521+ }
68522+#endif
68523+ return 1;
68524+}
68525+
68526+int
68527+gr_chroot_is_capable(const int cap)
68528+{
68529+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
68530+ return gr_task_chroot_is_capable(current, current_cred(), cap);
68531+#endif
68532+ return 1;
68533+}
68534+
68535+int
68536+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
68537+{
68538+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
68539+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
68540+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
68541+ if (cap_raised(chroot_caps, cap)) {
68542+ return 0;
68543+ }
68544+ }
68545+#endif
68546+ return 1;
68547+}
68548+
68549+int
68550+gr_chroot_is_capable_nolog(const int cap)
68551+{
68552+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
68553+ return gr_task_chroot_is_capable_nolog(current, cap);
68554+#endif
68555+ return 1;
68556+}
68557+
68558+int
68559+gr_handle_chroot_sysctl(const int op)
68560+{
68561+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
68562+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
68563+ proc_is_chrooted(current))
68564+ return -EACCES;
68565+#endif
68566+ return 0;
68567+}
68568+
68569+void
68570+gr_handle_chroot_chdir(const struct path *path)
68571+{
68572+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
68573+ if (grsec_enable_chroot_chdir)
68574+ set_fs_pwd(current->fs, path);
68575+#endif
68576+ return;
68577+}
68578+
68579+int
68580+gr_handle_chroot_chmod(const struct dentry *dentry,
68581+ const struct vfsmount *mnt, const int mode)
68582+{
68583+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
68584+ /* allow chmod +s on directories, but not files */
68585+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
68586+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
68587+ proc_is_chrooted(current)) {
68588+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
68589+ return -EPERM;
68590+ }
68591+#endif
68592+ return 0;
68593+}
68594diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
68595new file mode 100644
68596index 0000000..ce65ceb
68597--- /dev/null
68598+++ b/grsecurity/grsec_disabled.c
68599@@ -0,0 +1,434 @@
68600+#include <linux/kernel.h>
68601+#include <linux/module.h>
68602+#include <linux/sched.h>
68603+#include <linux/file.h>
68604+#include <linux/fs.h>
68605+#include <linux/kdev_t.h>
68606+#include <linux/net.h>
68607+#include <linux/in.h>
68608+#include <linux/ip.h>
68609+#include <linux/skbuff.h>
68610+#include <linux/sysctl.h>
68611+
68612+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
68613+void
68614+pax_set_initial_flags(struct linux_binprm *bprm)
68615+{
68616+ return;
68617+}
68618+#endif
68619+
68620+#ifdef CONFIG_SYSCTL
68621+__u32
68622+gr_handle_sysctl(const struct ctl_table * table, const int op)
68623+{
68624+ return 0;
68625+}
68626+#endif
68627+
68628+#ifdef CONFIG_TASKSTATS
68629+int gr_is_taskstats_denied(int pid)
68630+{
68631+ return 0;
68632+}
68633+#endif
68634+
68635+int
68636+gr_acl_is_enabled(void)
68637+{
68638+ return 0;
68639+}
68640+
68641+void
68642+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
68643+{
68644+ return;
68645+}
68646+
68647+int
68648+gr_handle_rawio(const struct inode *inode)
68649+{
68650+ return 0;
68651+}
68652+
68653+void
68654+gr_acl_handle_psacct(struct task_struct *task, const long code)
68655+{
68656+ return;
68657+}
68658+
68659+int
68660+gr_handle_ptrace(struct task_struct *task, const long request)
68661+{
68662+ return 0;
68663+}
68664+
68665+int
68666+gr_handle_proc_ptrace(struct task_struct *task)
68667+{
68668+ return 0;
68669+}
68670+
68671+int
68672+gr_set_acls(const int type)
68673+{
68674+ return 0;
68675+}
68676+
68677+int
68678+gr_check_hidden_task(const struct task_struct *tsk)
68679+{
68680+ return 0;
68681+}
68682+
68683+int
68684+gr_check_protected_task(const struct task_struct *task)
68685+{
68686+ return 0;
68687+}
68688+
68689+int
68690+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
68691+{
68692+ return 0;
68693+}
68694+
68695+void
68696+gr_copy_label(struct task_struct *tsk)
68697+{
68698+ return;
68699+}
68700+
68701+void
68702+gr_set_pax_flags(struct task_struct *task)
68703+{
68704+ return;
68705+}
68706+
68707+int
68708+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
68709+ const int unsafe_share)
68710+{
68711+ return 0;
68712+}
68713+
68714+void
68715+gr_handle_delete(const ino_t ino, const dev_t dev)
68716+{
68717+ return;
68718+}
68719+
68720+void
68721+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
68722+{
68723+ return;
68724+}
68725+
68726+void
68727+gr_handle_crash(struct task_struct *task, const int sig)
68728+{
68729+ return;
68730+}
68731+
68732+int
68733+gr_check_crash_exec(const struct file *filp)
68734+{
68735+ return 0;
68736+}
68737+
68738+int
68739+gr_check_crash_uid(const kuid_t uid)
68740+{
68741+ return 0;
68742+}
68743+
68744+void
68745+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
68746+ struct dentry *old_dentry,
68747+ struct dentry *new_dentry,
68748+ struct vfsmount *mnt, const __u8 replace)
68749+{
68750+ return;
68751+}
68752+
68753+int
68754+gr_search_socket(const int family, const int type, const int protocol)
68755+{
68756+ return 1;
68757+}
68758+
68759+int
68760+gr_search_connectbind(const int mode, const struct socket *sock,
68761+ const struct sockaddr_in *addr)
68762+{
68763+ return 0;
68764+}
68765+
68766+void
68767+gr_handle_alertkill(struct task_struct *task)
68768+{
68769+ return;
68770+}
68771+
68772+__u32
68773+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
68774+{
68775+ return 1;
68776+}
68777+
68778+__u32
68779+gr_acl_handle_hidden_file(const struct dentry * dentry,
68780+ const struct vfsmount * mnt)
68781+{
68782+ return 1;
68783+}
68784+
68785+__u32
68786+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
68787+ int acc_mode)
68788+{
68789+ return 1;
68790+}
68791+
68792+__u32
68793+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
68794+{
68795+ return 1;
68796+}
68797+
68798+__u32
68799+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
68800+{
68801+ return 1;
68802+}
68803+
68804+int
68805+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
68806+ unsigned int *vm_flags)
68807+{
68808+ return 1;
68809+}
68810+
68811+__u32
68812+gr_acl_handle_truncate(const struct dentry * dentry,
68813+ const struct vfsmount * mnt)
68814+{
68815+ return 1;
68816+}
68817+
68818+__u32
68819+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
68820+{
68821+ return 1;
68822+}
68823+
68824+__u32
68825+gr_acl_handle_access(const struct dentry * dentry,
68826+ const struct vfsmount * mnt, const int fmode)
68827+{
68828+ return 1;
68829+}
68830+
68831+__u32
68832+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
68833+ umode_t *mode)
68834+{
68835+ return 1;
68836+}
68837+
68838+__u32
68839+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
68840+{
68841+ return 1;
68842+}
68843+
68844+__u32
68845+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
68846+{
68847+ return 1;
68848+}
68849+
68850+void
68851+grsecurity_init(void)
68852+{
68853+ return;
68854+}
68855+
68856+umode_t gr_acl_umask(void)
68857+{
68858+ return 0;
68859+}
68860+
68861+__u32
68862+gr_acl_handle_mknod(const struct dentry * new_dentry,
68863+ const struct dentry * parent_dentry,
68864+ const struct vfsmount * parent_mnt,
68865+ const int mode)
68866+{
68867+ return 1;
68868+}
68869+
68870+__u32
68871+gr_acl_handle_mkdir(const struct dentry * new_dentry,
68872+ const struct dentry * parent_dentry,
68873+ const struct vfsmount * parent_mnt)
68874+{
68875+ return 1;
68876+}
68877+
68878+__u32
68879+gr_acl_handle_symlink(const struct dentry * new_dentry,
68880+ const struct dentry * parent_dentry,
68881+ const struct vfsmount * parent_mnt, const struct filename *from)
68882+{
68883+ return 1;
68884+}
68885+
68886+__u32
68887+gr_acl_handle_link(const struct dentry * new_dentry,
68888+ const struct dentry * parent_dentry,
68889+ const struct vfsmount * parent_mnt,
68890+ const struct dentry * old_dentry,
68891+ const struct vfsmount * old_mnt, const struct filename *to)
68892+{
68893+ return 1;
68894+}
68895+
68896+int
68897+gr_acl_handle_rename(const struct dentry *new_dentry,
68898+ const struct dentry *parent_dentry,
68899+ const struct vfsmount *parent_mnt,
68900+ const struct dentry *old_dentry,
68901+ const struct inode *old_parent_inode,
68902+ const struct vfsmount *old_mnt, const struct filename *newname)
68903+{
68904+ return 0;
68905+}
68906+
68907+int
68908+gr_acl_handle_filldir(const struct file *file, const char *name,
68909+ const int namelen, const ino_t ino)
68910+{
68911+ return 1;
68912+}
68913+
68914+int
68915+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
68916+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
68917+{
68918+ return 1;
68919+}
68920+
68921+int
68922+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
68923+{
68924+ return 0;
68925+}
68926+
68927+int
68928+gr_search_accept(const struct socket *sock)
68929+{
68930+ return 0;
68931+}
68932+
68933+int
68934+gr_search_listen(const struct socket *sock)
68935+{
68936+ return 0;
68937+}
68938+
68939+int
68940+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
68941+{
68942+ return 0;
68943+}
68944+
68945+__u32
68946+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
68947+{
68948+ return 1;
68949+}
68950+
68951+__u32
68952+gr_acl_handle_creat(const struct dentry * dentry,
68953+ const struct dentry * p_dentry,
68954+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
68955+ const int imode)
68956+{
68957+ return 1;
68958+}
68959+
68960+void
68961+gr_acl_handle_exit(void)
68962+{
68963+ return;
68964+}
68965+
68966+int
68967+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
68968+{
68969+ return 1;
68970+}
68971+
68972+void
68973+gr_set_role_label(const kuid_t uid, const kgid_t gid)
68974+{
68975+ return;
68976+}
68977+
68978+int
68979+gr_acl_handle_procpidmem(const struct task_struct *task)
68980+{
68981+ return 0;
68982+}
68983+
68984+int
68985+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
68986+{
68987+ return 0;
68988+}
68989+
68990+int
68991+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
68992+{
68993+ return 0;
68994+}
68995+
68996+void
68997+gr_set_kernel_label(struct task_struct *task)
68998+{
68999+ return;
69000+}
69001+
69002+int
69003+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
69004+{
69005+ return 0;
69006+}
69007+
69008+int
69009+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
69010+{
69011+ return 0;
69012+}
69013+
69014+int gr_acl_enable_at_secure(void)
69015+{
69016+ return 0;
69017+}
69018+
69019+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
69020+{
69021+ return dentry->d_sb->s_dev;
69022+}
69023+
69024+void gr_put_exec_file(struct task_struct *task)
69025+{
69026+ return;
69027+}
69028+
69029+EXPORT_SYMBOL(gr_set_kernel_label);
69030+#ifdef CONFIG_SECURITY
69031+EXPORT_SYMBOL(gr_check_user_change);
69032+EXPORT_SYMBOL(gr_check_group_change);
69033+#endif
69034diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
69035new file mode 100644
69036index 0000000..387032b
69037--- /dev/null
69038+++ b/grsecurity/grsec_exec.c
69039@@ -0,0 +1,187 @@
69040+#include <linux/kernel.h>
69041+#include <linux/sched.h>
69042+#include <linux/file.h>
69043+#include <linux/binfmts.h>
69044+#include <linux/fs.h>
69045+#include <linux/types.h>
69046+#include <linux/grdefs.h>
69047+#include <linux/grsecurity.h>
69048+#include <linux/grinternal.h>
69049+#include <linux/capability.h>
69050+#include <linux/module.h>
69051+#include <linux/compat.h>
69052+
69053+#include <asm/uaccess.h>
69054+
69055+#ifdef CONFIG_GRKERNSEC_EXECLOG
69056+static char gr_exec_arg_buf[132];
69057+static DEFINE_MUTEX(gr_exec_arg_mutex);
69058+#endif
69059+
69060+struct user_arg_ptr {
69061+#ifdef CONFIG_COMPAT
69062+ bool is_compat;
69063+#endif
69064+ union {
69065+ const char __user *const __user *native;
69066+#ifdef CONFIG_COMPAT
69067+ const compat_uptr_t __user *compat;
69068+#endif
69069+ } ptr;
69070+};
69071+
69072+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
69073+
69074+void
69075+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
69076+{
69077+#ifdef CONFIG_GRKERNSEC_EXECLOG
69078+ char *grarg = gr_exec_arg_buf;
69079+ unsigned int i, x, execlen = 0;
69080+ char c;
69081+
69082+ if (!((grsec_enable_execlog && grsec_enable_group &&
69083+ in_group_p(grsec_audit_gid))
69084+ || (grsec_enable_execlog && !grsec_enable_group)))
69085+ return;
69086+
69087+ mutex_lock(&gr_exec_arg_mutex);
69088+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
69089+
69090+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
69091+ const char __user *p;
69092+ unsigned int len;
69093+
69094+ p = get_user_arg_ptr(argv, i);
69095+ if (IS_ERR(p))
69096+ goto log;
69097+
69098+ len = strnlen_user(p, 128 - execlen);
69099+ if (len > 128 - execlen)
69100+ len = 128 - execlen;
69101+ else if (len > 0)
69102+ len--;
69103+ if (copy_from_user(grarg + execlen, p, len))
69104+ goto log;
69105+
69106+ /* rewrite unprintable characters */
69107+ for (x = 0; x < len; x++) {
69108+ c = *(grarg + execlen + x);
69109+ if (c < 32 || c > 126)
69110+ *(grarg + execlen + x) = ' ';
69111+ }
69112+
69113+ execlen += len;
69114+ *(grarg + execlen) = ' ';
69115+ *(grarg + execlen + 1) = '\0';
69116+ execlen++;
69117+ }
69118+
69119+ log:
69120+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
69121+ bprm->file->f_path.mnt, grarg);
69122+ mutex_unlock(&gr_exec_arg_mutex);
69123+#endif
69124+ return;
69125+}
69126+
69127+#ifdef CONFIG_GRKERNSEC
69128+extern int gr_acl_is_capable(const int cap);
69129+extern int gr_acl_is_capable_nolog(const int cap);
69130+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
69131+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
69132+extern int gr_chroot_is_capable(const int cap);
69133+extern int gr_chroot_is_capable_nolog(const int cap);
69134+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
69135+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
69136+#endif
69137+
69138+const char *captab_log[] = {
69139+ "CAP_CHOWN",
69140+ "CAP_DAC_OVERRIDE",
69141+ "CAP_DAC_READ_SEARCH",
69142+ "CAP_FOWNER",
69143+ "CAP_FSETID",
69144+ "CAP_KILL",
69145+ "CAP_SETGID",
69146+ "CAP_SETUID",
69147+ "CAP_SETPCAP",
69148+ "CAP_LINUX_IMMUTABLE",
69149+ "CAP_NET_BIND_SERVICE",
69150+ "CAP_NET_BROADCAST",
69151+ "CAP_NET_ADMIN",
69152+ "CAP_NET_RAW",
69153+ "CAP_IPC_LOCK",
69154+ "CAP_IPC_OWNER",
69155+ "CAP_SYS_MODULE",
69156+ "CAP_SYS_RAWIO",
69157+ "CAP_SYS_CHROOT",
69158+ "CAP_SYS_PTRACE",
69159+ "CAP_SYS_PACCT",
69160+ "CAP_SYS_ADMIN",
69161+ "CAP_SYS_BOOT",
69162+ "CAP_SYS_NICE",
69163+ "CAP_SYS_RESOURCE",
69164+ "CAP_SYS_TIME",
69165+ "CAP_SYS_TTY_CONFIG",
69166+ "CAP_MKNOD",
69167+ "CAP_LEASE",
69168+ "CAP_AUDIT_WRITE",
69169+ "CAP_AUDIT_CONTROL",
69170+ "CAP_SETFCAP",
69171+ "CAP_MAC_OVERRIDE",
69172+ "CAP_MAC_ADMIN",
69173+ "CAP_SYSLOG",
69174+ "CAP_WAKE_ALARM"
69175+};
69176+
69177+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
69178+
69179+int gr_is_capable(const int cap)
69180+{
69181+#ifdef CONFIG_GRKERNSEC
69182+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
69183+ return 1;
69184+ return 0;
69185+#else
69186+ return 1;
69187+#endif
69188+}
69189+
69190+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
69191+{
69192+#ifdef CONFIG_GRKERNSEC
69193+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
69194+ return 1;
69195+ return 0;
69196+#else
69197+ return 1;
69198+#endif
69199+}
69200+
69201+int gr_is_capable_nolog(const int cap)
69202+{
69203+#ifdef CONFIG_GRKERNSEC
69204+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
69205+ return 1;
69206+ return 0;
69207+#else
69208+ return 1;
69209+#endif
69210+}
69211+
69212+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
69213+{
69214+#ifdef CONFIG_GRKERNSEC
69215+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
69216+ return 1;
69217+ return 0;
69218+#else
69219+ return 1;
69220+#endif
69221+}
69222+
69223+EXPORT_SYMBOL(gr_is_capable);
69224+EXPORT_SYMBOL(gr_is_capable_nolog);
69225+EXPORT_SYMBOL(gr_task_is_capable);
69226+EXPORT_SYMBOL(gr_task_is_capable_nolog);
69227diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
69228new file mode 100644
69229index 0000000..06cc6ea
69230--- /dev/null
69231+++ b/grsecurity/grsec_fifo.c
69232@@ -0,0 +1,24 @@
69233+#include <linux/kernel.h>
69234+#include <linux/sched.h>
69235+#include <linux/fs.h>
69236+#include <linux/file.h>
69237+#include <linux/grinternal.h>
69238+
69239+int
69240+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
69241+ const struct dentry *dir, const int flag, const int acc_mode)
69242+{
69243+#ifdef CONFIG_GRKERNSEC_FIFO
69244+ const struct cred *cred = current_cred();
69245+
69246+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
69247+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
69248+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
69249+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
69250+ if (!inode_permission(dentry->d_inode, acc_mode))
69251+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
69252+ return -EACCES;
69253+ }
69254+#endif
69255+ return 0;
69256+}
69257diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
69258new file mode 100644
69259index 0000000..8ca18bf
69260--- /dev/null
69261+++ b/grsecurity/grsec_fork.c
69262@@ -0,0 +1,23 @@
69263+#include <linux/kernel.h>
69264+#include <linux/sched.h>
69265+#include <linux/grsecurity.h>
69266+#include <linux/grinternal.h>
69267+#include <linux/errno.h>
69268+
69269+void
69270+gr_log_forkfail(const int retval)
69271+{
69272+#ifdef CONFIG_GRKERNSEC_FORKFAIL
69273+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
69274+ switch (retval) {
69275+ case -EAGAIN:
69276+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
69277+ break;
69278+ case -ENOMEM:
69279+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
69280+ break;
69281+ }
69282+ }
69283+#endif
69284+ return;
69285+}
69286diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
69287new file mode 100644
e2b79cd1 69288index 0000000..836f38f
bb5f0bf8
AF
69289--- /dev/null
69290+++ b/grsecurity/grsec_init.c
e2b79cd1 69291@@ -0,0 +1,280 @@
bb5f0bf8
AF
69292+#include <linux/kernel.h>
69293+#include <linux/sched.h>
69294+#include <linux/mm.h>
69295+#include <linux/gracl.h>
69296+#include <linux/slab.h>
69297+#include <linux/vmalloc.h>
69298+#include <linux/percpu.h>
69299+#include <linux/module.h>
69300+
69301+int grsec_enable_ptrace_readexec;
69302+int grsec_enable_setxid;
69303+int grsec_enable_symlinkown;
69304+kgid_t grsec_symlinkown_gid;
69305+int grsec_enable_brute;
69306+int grsec_enable_link;
69307+int grsec_enable_dmesg;
69308+int grsec_enable_harden_ptrace;
69309+int grsec_enable_fifo;
69310+int grsec_enable_execlog;
69311+int grsec_enable_signal;
69312+int grsec_enable_forkfail;
69313+int grsec_enable_audit_ptrace;
69314+int grsec_enable_time;
69315+int grsec_enable_group;
69316+kgid_t grsec_audit_gid;
69317+int grsec_enable_chdir;
69318+int grsec_enable_mount;
69319+int grsec_enable_rofs;
e2b79cd1 69320+int grsec_deny_new_usb;
bb5f0bf8
AF
69321+int grsec_enable_chroot_findtask;
69322+int grsec_enable_chroot_mount;
69323+int grsec_enable_chroot_shmat;
69324+int grsec_enable_chroot_fchdir;
69325+int grsec_enable_chroot_double;
69326+int grsec_enable_chroot_pivot;
69327+int grsec_enable_chroot_chdir;
69328+int grsec_enable_chroot_chmod;
69329+int grsec_enable_chroot_mknod;
69330+int grsec_enable_chroot_nice;
69331+int grsec_enable_chroot_execlog;
69332+int grsec_enable_chroot_caps;
69333+int grsec_enable_chroot_sysctl;
69334+int grsec_enable_chroot_unix;
69335+int grsec_enable_tpe;
69336+kgid_t grsec_tpe_gid;
69337+int grsec_enable_blackhole;
69338+#ifdef CONFIG_IPV6_MODULE
69339+EXPORT_SYMBOL(grsec_enable_blackhole);
69340+#endif
69341+int grsec_lastack_retries;
69342+int grsec_enable_tpe_all;
69343+int grsec_enable_tpe_invert;
69344+int grsec_enable_socket_all;
69345+kgid_t grsec_socket_all_gid;
69346+int grsec_enable_socket_client;
69347+kgid_t grsec_socket_client_gid;
69348+int grsec_enable_socket_server;
69349+kgid_t grsec_socket_server_gid;
69350+int grsec_resource_logging;
69351+int grsec_disable_privio;
69352+int grsec_enable_log_rwxmaps;
69353+int grsec_lock;
69354+
69355+DEFINE_SPINLOCK(grsec_alert_lock);
69356+unsigned long grsec_alert_wtime = 0;
69357+unsigned long grsec_alert_fyet = 0;
69358+
69359+DEFINE_SPINLOCK(grsec_audit_lock);
69360+
69361+DEFINE_RWLOCK(grsec_exec_file_lock);
69362+
69363+char *gr_shared_page[4];
69364+
69365+char *gr_alert_log_fmt;
69366+char *gr_audit_log_fmt;
69367+char *gr_alert_log_buf;
69368+char *gr_audit_log_buf;
69369+
69370+extern struct gr_arg *gr_usermode;
69371+extern unsigned char *gr_system_salt;
69372+extern unsigned char *gr_system_sum;
69373+
69374+void __init
69375+grsecurity_init(void)
69376+{
69377+ int j;
69378+ /* create the per-cpu shared pages */
69379+
69380+#ifdef CONFIG_X86
69381+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
69382+#endif
69383+
69384+ for (j = 0; j < 4; j++) {
69385+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
69386+ if (gr_shared_page[j] == NULL) {
69387+ panic("Unable to allocate grsecurity shared page");
69388+ return;
69389+ }
69390+ }
69391+
69392+ /* allocate log buffers */
69393+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
69394+ if (!gr_alert_log_fmt) {
69395+ panic("Unable to allocate grsecurity alert log format buffer");
69396+ return;
69397+ }
69398+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
69399+ if (!gr_audit_log_fmt) {
69400+ panic("Unable to allocate grsecurity audit log format buffer");
69401+ return;
69402+ }
69403+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
69404+ if (!gr_alert_log_buf) {
69405+ panic("Unable to allocate grsecurity alert log buffer");
69406+ return;
69407+ }
69408+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
69409+ if (!gr_audit_log_buf) {
69410+ panic("Unable to allocate grsecurity audit log buffer");
69411+ return;
69412+ }
69413+
69414+ /* allocate memory for authentication structure */
69415+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
69416+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
69417+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
69418+
69419+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
69420+ panic("Unable to allocate grsecurity authentication structure");
69421+ return;
69422+ }
69423+
69424+
69425+#ifdef CONFIG_GRKERNSEC_IO
69426+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
69427+ grsec_disable_privio = 1;
69428+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
69429+ grsec_disable_privio = 1;
69430+#else
69431+ grsec_disable_privio = 0;
69432+#endif
69433+#endif
69434+
69435+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
69436+ /* for backward compatibility, tpe_invert always defaults to on if
69437+ enabled in the kernel
69438+ */
69439+ grsec_enable_tpe_invert = 1;
69440+#endif
69441+
69442+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
69443+#ifndef CONFIG_GRKERNSEC_SYSCTL
69444+ grsec_lock = 1;
69445+#endif
69446+
69447+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
69448+ grsec_enable_log_rwxmaps = 1;
69449+#endif
69450+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
69451+ grsec_enable_group = 1;
69452+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
69453+#endif
69454+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
69455+ grsec_enable_ptrace_readexec = 1;
69456+#endif
69457+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
69458+ grsec_enable_chdir = 1;
69459+#endif
69460+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
69461+ grsec_enable_harden_ptrace = 1;
69462+#endif
69463+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
69464+ grsec_enable_mount = 1;
69465+#endif
69466+#ifdef CONFIG_GRKERNSEC_LINK
69467+ grsec_enable_link = 1;
69468+#endif
69469+#ifdef CONFIG_GRKERNSEC_BRUTE
69470+ grsec_enable_brute = 1;
69471+#endif
69472+#ifdef CONFIG_GRKERNSEC_DMESG
69473+ grsec_enable_dmesg = 1;
69474+#endif
69475+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69476+ grsec_enable_blackhole = 1;
69477+ grsec_lastack_retries = 4;
69478+#endif
69479+#ifdef CONFIG_GRKERNSEC_FIFO
69480+ grsec_enable_fifo = 1;
69481+#endif
69482+#ifdef CONFIG_GRKERNSEC_EXECLOG
69483+ grsec_enable_execlog = 1;
69484+#endif
69485+#ifdef CONFIG_GRKERNSEC_SETXID
69486+ grsec_enable_setxid = 1;
69487+#endif
69488+#ifdef CONFIG_GRKERNSEC_SIGNAL
69489+ grsec_enable_signal = 1;
69490+#endif
69491+#ifdef CONFIG_GRKERNSEC_FORKFAIL
69492+ grsec_enable_forkfail = 1;
69493+#endif
69494+#ifdef CONFIG_GRKERNSEC_TIME
69495+ grsec_enable_time = 1;
69496+#endif
69497+#ifdef CONFIG_GRKERNSEC_RESLOG
69498+ grsec_resource_logging = 1;
69499+#endif
69500+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
69501+ grsec_enable_chroot_findtask = 1;
69502+#endif
69503+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
69504+ grsec_enable_chroot_unix = 1;
69505+#endif
69506+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
69507+ grsec_enable_chroot_mount = 1;
69508+#endif
69509+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
69510+ grsec_enable_chroot_fchdir = 1;
69511+#endif
69512+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
69513+ grsec_enable_chroot_shmat = 1;
69514+#endif
69515+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
69516+ grsec_enable_audit_ptrace = 1;
69517+#endif
69518+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
69519+ grsec_enable_chroot_double = 1;
69520+#endif
69521+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
69522+ grsec_enable_chroot_pivot = 1;
69523+#endif
69524+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
69525+ grsec_enable_chroot_chdir = 1;
69526+#endif
69527+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
69528+ grsec_enable_chroot_chmod = 1;
69529+#endif
69530+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
69531+ grsec_enable_chroot_mknod = 1;
69532+#endif
69533+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
69534+ grsec_enable_chroot_nice = 1;
69535+#endif
69536+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
69537+ grsec_enable_chroot_execlog = 1;
69538+#endif
69539+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
69540+ grsec_enable_chroot_caps = 1;
69541+#endif
69542+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
69543+ grsec_enable_chroot_sysctl = 1;
69544+#endif
69545+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
69546+ grsec_enable_symlinkown = 1;
69547+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
69548+#endif
69549+#ifdef CONFIG_GRKERNSEC_TPE
69550+ grsec_enable_tpe = 1;
69551+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
69552+#ifdef CONFIG_GRKERNSEC_TPE_ALL
69553+ grsec_enable_tpe_all = 1;
69554+#endif
69555+#endif
69556+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
69557+ grsec_enable_socket_all = 1;
69558+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
69559+#endif
69560+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
69561+ grsec_enable_socket_client = 1;
69562+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
69563+#endif
69564+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
69565+ grsec_enable_socket_server = 1;
69566+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
69567+#endif
69568+#endif
69569+
69570+ return;
69571+}
69572diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
69573new file mode 100644
69574index 0000000..5e05e20
69575--- /dev/null
69576+++ b/grsecurity/grsec_link.c
69577@@ -0,0 +1,58 @@
69578+#include <linux/kernel.h>
69579+#include <linux/sched.h>
69580+#include <linux/fs.h>
69581+#include <linux/file.h>
69582+#include <linux/grinternal.h>
69583+
69584+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
69585+{
69586+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
69587+ const struct inode *link_inode = link->dentry->d_inode;
69588+
69589+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
69590+ /* ignore root-owned links, e.g. /proc/self */
69591+ gr_is_global_nonroot(link_inode->i_uid) && target &&
69592+ !uid_eq(link_inode->i_uid, target->i_uid)) {
69593+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
69594+ return 1;
69595+ }
69596+#endif
69597+ return 0;
69598+}
69599+
69600+int
69601+gr_handle_follow_link(const struct inode *parent,
69602+ const struct inode *inode,
69603+ const struct dentry *dentry, const struct vfsmount *mnt)
69604+{
69605+#ifdef CONFIG_GRKERNSEC_LINK
69606+ const struct cred *cred = current_cred();
69607+
69608+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
69609+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
69610+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
69611+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
69612+ return -EACCES;
69613+ }
69614+#endif
69615+ return 0;
69616+}
69617+
69618+int
69619+gr_handle_hardlink(const struct dentry *dentry,
69620+ const struct vfsmount *mnt,
69621+ struct inode *inode, const int mode, const struct filename *to)
69622+{
69623+#ifdef CONFIG_GRKERNSEC_LINK
69624+ const struct cred *cred = current_cred();
69625+
69626+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
69627+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
69628+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
69629+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
69630+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
69631+ return -EPERM;
69632+ }
69633+#endif
69634+ return 0;
69635+}
69636diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
69637new file mode 100644
69638index 0000000..dbe0a6b
69639--- /dev/null
69640+++ b/grsecurity/grsec_log.c
69641@@ -0,0 +1,341 @@
69642+#include <linux/kernel.h>
69643+#include <linux/sched.h>
69644+#include <linux/file.h>
69645+#include <linux/tty.h>
69646+#include <linux/fs.h>
69647+#include <linux/mm.h>
69648+#include <linux/grinternal.h>
69649+
69650+#ifdef CONFIG_TREE_PREEMPT_RCU
69651+#define DISABLE_PREEMPT() preempt_disable()
69652+#define ENABLE_PREEMPT() preempt_enable()
69653+#else
69654+#define DISABLE_PREEMPT()
69655+#define ENABLE_PREEMPT()
69656+#endif
69657+
69658+#define BEGIN_LOCKS(x) \
69659+ DISABLE_PREEMPT(); \
69660+ rcu_read_lock(); \
69661+ read_lock(&tasklist_lock); \
69662+ read_lock(&grsec_exec_file_lock); \
69663+ if (x != GR_DO_AUDIT) \
69664+ spin_lock(&grsec_alert_lock); \
69665+ else \
69666+ spin_lock(&grsec_audit_lock)
69667+
69668+#define END_LOCKS(x) \
69669+ if (x != GR_DO_AUDIT) \
69670+ spin_unlock(&grsec_alert_lock); \
69671+ else \
69672+ spin_unlock(&grsec_audit_lock); \
69673+ read_unlock(&grsec_exec_file_lock); \
69674+ read_unlock(&tasklist_lock); \
69675+ rcu_read_unlock(); \
69676+ ENABLE_PREEMPT(); \
69677+ if (x == GR_DONT_AUDIT) \
69678+ gr_handle_alertkill(current)
69679+
69680+enum {
69681+ FLOODING,
69682+ NO_FLOODING
69683+};
69684+
69685+extern char *gr_alert_log_fmt;
69686+extern char *gr_audit_log_fmt;
69687+extern char *gr_alert_log_buf;
69688+extern char *gr_audit_log_buf;
69689+
69690+static int gr_log_start(int audit)
69691+{
69692+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
69693+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
69694+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
69695+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
69696+ unsigned long curr_secs = get_seconds();
69697+
69698+ if (audit == GR_DO_AUDIT)
69699+ goto set_fmt;
69700+
69701+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
69702+ grsec_alert_wtime = curr_secs;
69703+ grsec_alert_fyet = 0;
69704+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
69705+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
69706+ grsec_alert_fyet++;
69707+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
69708+ grsec_alert_wtime = curr_secs;
69709+ grsec_alert_fyet++;
69710+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
69711+ return FLOODING;
69712+ }
69713+ else return FLOODING;
69714+
69715+set_fmt:
69716+#endif
69717+ memset(buf, 0, PAGE_SIZE);
69718+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
69719+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
69720+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
69721+ } else if (current->signal->curr_ip) {
69722+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
69723+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
69724+ } else if (gr_acl_is_enabled()) {
69725+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
69726+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
69727+ } else {
69728+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
69729+ strcpy(buf, fmt);
69730+ }
69731+
69732+ return NO_FLOODING;
69733+}
69734+
69735+static void gr_log_middle(int audit, const char *msg, va_list ap)
69736+ __attribute__ ((format (printf, 2, 0)));
69737+
69738+static void gr_log_middle(int audit, const char *msg, va_list ap)
69739+{
69740+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
69741+ unsigned int len = strlen(buf);
69742+
69743+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
69744+
69745+ return;
69746+}
69747+
69748+static void gr_log_middle_varargs(int audit, const char *msg, ...)
69749+ __attribute__ ((format (printf, 2, 3)));
69750+
69751+static void gr_log_middle_varargs(int audit, const char *msg, ...)
69752+{
69753+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
69754+ unsigned int len = strlen(buf);
69755+ va_list ap;
69756+
69757+ va_start(ap, msg);
69758+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
69759+ va_end(ap);
69760+
69761+ return;
69762+}
69763+
69764+static void gr_log_end(int audit, int append_default)
69765+{
69766+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
69767+ if (append_default) {
69768+ struct task_struct *task = current;
69769+ struct task_struct *parent = task->real_parent;
69770+ const struct cred *cred = __task_cred(task);
69771+ const struct cred *pcred = __task_cred(parent);
69772+ unsigned int len = strlen(buf);
69773+
69774+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
69775+ }
69776+
69777+ printk("%s\n", buf);
69778+
69779+ return;
69780+}
69781+
69782+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
69783+{
69784+ int logtype;
69785+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
69786+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
69787+ void *voidptr = NULL;
69788+ int num1 = 0, num2 = 0;
69789+ unsigned long ulong1 = 0, ulong2 = 0;
69790+ struct dentry *dentry = NULL;
69791+ struct vfsmount *mnt = NULL;
69792+ struct file *file = NULL;
69793+ struct task_struct *task = NULL;
69794+ struct vm_area_struct *vma = NULL;
69795+ const struct cred *cred, *pcred;
69796+ va_list ap;
69797+
69798+ BEGIN_LOCKS(audit);
69799+ logtype = gr_log_start(audit);
69800+ if (logtype == FLOODING) {
69801+ END_LOCKS(audit);
69802+ return;
69803+ }
69804+ va_start(ap, argtypes);
69805+ switch (argtypes) {
69806+ case GR_TTYSNIFF:
69807+ task = va_arg(ap, struct task_struct *);
69808+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
69809+ break;
69810+ case GR_SYSCTL_HIDDEN:
69811+ str1 = va_arg(ap, char *);
69812+ gr_log_middle_varargs(audit, msg, result, str1);
69813+ break;
69814+ case GR_RBAC:
69815+ dentry = va_arg(ap, struct dentry *);
69816+ mnt = va_arg(ap, struct vfsmount *);
69817+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
69818+ break;
69819+ case GR_RBAC_STR:
69820+ dentry = va_arg(ap, struct dentry *);
69821+ mnt = va_arg(ap, struct vfsmount *);
69822+ str1 = va_arg(ap, char *);
69823+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
69824+ break;
69825+ case GR_STR_RBAC:
69826+ str1 = va_arg(ap, char *);
69827+ dentry = va_arg(ap, struct dentry *);
69828+ mnt = va_arg(ap, struct vfsmount *);
69829+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
69830+ break;
69831+ case GR_RBAC_MODE2:
69832+ dentry = va_arg(ap, struct dentry *);
69833+ mnt = va_arg(ap, struct vfsmount *);
69834+ str1 = va_arg(ap, char *);
69835+ str2 = va_arg(ap, char *);
69836+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
69837+ break;
69838+ case GR_RBAC_MODE3:
69839+ dentry = va_arg(ap, struct dentry *);
69840+ mnt = va_arg(ap, struct vfsmount *);
69841+ str1 = va_arg(ap, char *);
69842+ str2 = va_arg(ap, char *);
69843+ str3 = va_arg(ap, char *);
69844+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
69845+ break;
69846+ case GR_FILENAME:
69847+ dentry = va_arg(ap, struct dentry *);
69848+ mnt = va_arg(ap, struct vfsmount *);
69849+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
69850+ break;
69851+ case GR_STR_FILENAME:
69852+ str1 = va_arg(ap, char *);
69853+ dentry = va_arg(ap, struct dentry *);
69854+ mnt = va_arg(ap, struct vfsmount *);
69855+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
69856+ break;
69857+ case GR_FILENAME_STR:
69858+ dentry = va_arg(ap, struct dentry *);
69859+ mnt = va_arg(ap, struct vfsmount *);
69860+ str1 = va_arg(ap, char *);
69861+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
69862+ break;
69863+ case GR_FILENAME_TWO_INT:
69864+ dentry = va_arg(ap, struct dentry *);
69865+ mnt = va_arg(ap, struct vfsmount *);
69866+ num1 = va_arg(ap, int);
69867+ num2 = va_arg(ap, int);
69868+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
69869+ break;
69870+ case GR_FILENAME_TWO_INT_STR:
69871+ dentry = va_arg(ap, struct dentry *);
69872+ mnt = va_arg(ap, struct vfsmount *);
69873+ num1 = va_arg(ap, int);
69874+ num2 = va_arg(ap, int);
69875+ str1 = va_arg(ap, char *);
69876+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
69877+ break;
69878+ case GR_TEXTREL:
69879+ file = va_arg(ap, struct file *);
69880+ ulong1 = va_arg(ap, unsigned long);
69881+ ulong2 = va_arg(ap, unsigned long);
69882+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
69883+ break;
69884+ case GR_PTRACE:
69885+ task = va_arg(ap, struct task_struct *);
69886+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
69887+ break;
69888+ case GR_RESOURCE:
69889+ task = va_arg(ap, struct task_struct *);
69890+ cred = __task_cred(task);
69891+ pcred = __task_cred(task->real_parent);
69892+ ulong1 = va_arg(ap, unsigned long);
69893+ str1 = va_arg(ap, char *);
69894+ ulong2 = va_arg(ap, unsigned long);
69895+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
69896+ break;
69897+ case GR_CAP:
69898+ task = va_arg(ap, struct task_struct *);
69899+ cred = __task_cred(task);
69900+ pcred = __task_cred(task->real_parent);
69901+ str1 = va_arg(ap, char *);
69902+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
69903+ break;
69904+ case GR_SIG:
69905+ str1 = va_arg(ap, char *);
69906+ voidptr = va_arg(ap, void *);
69907+ gr_log_middle_varargs(audit, msg, str1, voidptr);
69908+ break;
69909+ case GR_SIG2:
69910+ task = va_arg(ap, struct task_struct *);
69911+ cred = __task_cred(task);
69912+ pcred = __task_cred(task->real_parent);
69913+ num1 = va_arg(ap, int);
69914+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
69915+ break;
69916+ case GR_CRASH1:
69917+ task = va_arg(ap, struct task_struct *);
69918+ cred = __task_cred(task);
69919+ pcred = __task_cred(task->real_parent);
69920+ ulong1 = va_arg(ap, unsigned long);
69921+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
69922+ break;
69923+ case GR_CRASH2:
69924+ task = va_arg(ap, struct task_struct *);
69925+ cred = __task_cred(task);
69926+ pcred = __task_cred(task->real_parent);
69927+ ulong1 = va_arg(ap, unsigned long);
69928+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
69929+ break;
69930+ case GR_RWXMAP:
69931+ file = va_arg(ap, struct file *);
69932+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
69933+ break;
69934+ case GR_RWXMAPVMA:
69935+ vma = va_arg(ap, struct vm_area_struct *);
69936+ if (vma->vm_file)
69937+ str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt);
69938+ else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
69939+ str1 = "<stack>";
69940+ else if (vma->vm_start <= current->mm->brk &&
69941+ vma->vm_end >= current->mm->start_brk)
69942+ str1 = "<heap>";
69943+ else
69944+ str1 = "<anonymous mapping>";
69945+ gr_log_middle_varargs(audit, msg, str1);
69946+ break;
69947+ case GR_PSACCT:
69948+ {
69949+ unsigned int wday, cday;
69950+ __u8 whr, chr;
69951+ __u8 wmin, cmin;
69952+ __u8 wsec, csec;
69953+ char cur_tty[64] = { 0 };
69954+ char parent_tty[64] = { 0 };
69955+
69956+ task = va_arg(ap, struct task_struct *);
69957+ wday = va_arg(ap, unsigned int);
69958+ cday = va_arg(ap, unsigned int);
69959+ whr = va_arg(ap, int);
69960+ chr = va_arg(ap, int);
69961+ wmin = va_arg(ap, int);
69962+ cmin = va_arg(ap, int);
69963+ wsec = va_arg(ap, int);
69964+ csec = va_arg(ap, int);
69965+ ulong1 = va_arg(ap, unsigned long);
69966+ cred = __task_cred(task);
69967+ pcred = __task_cred(task->real_parent);
69968+
69969+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
69970+ }
69971+ break;
69972+ default:
69973+ gr_log_middle(audit, msg, ap);
69974+ }
69975+ va_end(ap);
69976+ // these don't need DEFAULTSECARGS printed on the end
69977+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
69978+ gr_log_end(audit, 0);
69979+ else
69980+ gr_log_end(audit, 1);
69981+ END_LOCKS(audit);
69982+}
69983diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
69984new file mode 100644
69985index 0000000..f536303
69986--- /dev/null
69987+++ b/grsecurity/grsec_mem.c
69988@@ -0,0 +1,40 @@
69989+#include <linux/kernel.h>
69990+#include <linux/sched.h>
69991+#include <linux/mm.h>
69992+#include <linux/mman.h>
69993+#include <linux/grinternal.h>
69994+
69995+void
69996+gr_handle_ioperm(void)
69997+{
69998+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
69999+ return;
70000+}
70001+
70002+void
70003+gr_handle_iopl(void)
70004+{
70005+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
70006+ return;
70007+}
70008+
70009+void
70010+gr_handle_mem_readwrite(u64 from, u64 to)
70011+{
70012+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
70013+ return;
70014+}
70015+
70016+void
70017+gr_handle_vm86(void)
70018+{
70019+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
70020+ return;
70021+}
70022+
70023+void
70024+gr_log_badprocpid(const char *entry)
70025+{
70026+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
70027+ return;
70028+}
70029diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
70030new file mode 100644
70031index 0000000..2131422
70032--- /dev/null
70033+++ b/grsecurity/grsec_mount.c
70034@@ -0,0 +1,62 @@
70035+#include <linux/kernel.h>
70036+#include <linux/sched.h>
70037+#include <linux/mount.h>
70038+#include <linux/grsecurity.h>
70039+#include <linux/grinternal.h>
70040+
70041+void
70042+gr_log_remount(const char *devname, const int retval)
70043+{
70044+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
70045+ if (grsec_enable_mount && (retval >= 0))
70046+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
70047+#endif
70048+ return;
70049+}
70050+
70051+void
70052+gr_log_unmount(const char *devname, const int retval)
70053+{
70054+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
70055+ if (grsec_enable_mount && (retval >= 0))
70056+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
70057+#endif
70058+ return;
70059+}
70060+
70061+void
70062+gr_log_mount(const char *from, const char *to, const int retval)
70063+{
70064+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
70065+ if (grsec_enable_mount && (retval >= 0))
70066+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
70067+#endif
70068+ return;
70069+}
70070+
70071+int
70072+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
70073+{
70074+#ifdef CONFIG_GRKERNSEC_ROFS
70075+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
70076+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
70077+ return -EPERM;
70078+ } else
70079+ return 0;
70080+#endif
70081+ return 0;
70082+}
70083+
70084+int
70085+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
70086+{
70087+#ifdef CONFIG_GRKERNSEC_ROFS
70088+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
70089+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
70090+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
70091+ return -EPERM;
70092+ } else
70093+ return 0;
70094+#endif
70095+ return 0;
70096+}
70097diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
70098new file mode 100644
70099index 0000000..6ee9d50
70100--- /dev/null
70101+++ b/grsecurity/grsec_pax.c
70102@@ -0,0 +1,45 @@
70103+#include <linux/kernel.h>
70104+#include <linux/sched.h>
70105+#include <linux/mm.h>
70106+#include <linux/file.h>
70107+#include <linux/grinternal.h>
70108+#include <linux/grsecurity.h>
70109+
70110+void
70111+gr_log_textrel(struct vm_area_struct * vma)
70112+{
70113+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
70114+ if (grsec_enable_log_rwxmaps)
70115+ gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
70116+#endif
70117+ return;
70118+}
70119+
70120+void gr_log_ptgnustack(struct file *file)
70121+{
70122+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
70123+ if (grsec_enable_log_rwxmaps)
70124+ gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file);
70125+#endif
70126+ return;
70127+}
70128+
70129+void
70130+gr_log_rwxmmap(struct file *file)
70131+{
70132+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
70133+ if (grsec_enable_log_rwxmaps)
70134+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
70135+#endif
70136+ return;
70137+}
70138+
70139+void
70140+gr_log_rwxmprotect(struct vm_area_struct *vma)
70141+{
70142+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
70143+ if (grsec_enable_log_rwxmaps)
70144+ gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma);
70145+#endif
70146+ return;
70147+}
70148diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
70149new file mode 100644
70150index 0000000..f7f29aa
70151--- /dev/null
70152+++ b/grsecurity/grsec_ptrace.c
70153@@ -0,0 +1,30 @@
70154+#include <linux/kernel.h>
70155+#include <linux/sched.h>
70156+#include <linux/grinternal.h>
70157+#include <linux/security.h>
70158+
70159+void
70160+gr_audit_ptrace(struct task_struct *task)
70161+{
70162+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
70163+ if (grsec_enable_audit_ptrace)
70164+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
70165+#endif
70166+ return;
70167+}
70168+
70169+int
70170+gr_ptrace_readexec(struct file *file, int unsafe_flags)
70171+{
70172+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
70173+ const struct dentry *dentry = file->f_path.dentry;
70174+ const struct vfsmount *mnt = file->f_path.mnt;
70175+
70176+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
70177+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
70178+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
70179+ return -EACCES;
70180+ }
70181+#endif
70182+ return 0;
70183+}
70184diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
70185new file mode 100644
70186index 0000000..4e29cc7
70187--- /dev/null
70188+++ b/grsecurity/grsec_sig.c
70189@@ -0,0 +1,246 @@
70190+#include <linux/kernel.h>
70191+#include <linux/sched.h>
70192+#include <linux/fs.h>
70193+#include <linux/delay.h>
70194+#include <linux/grsecurity.h>
70195+#include <linux/grinternal.h>
70196+#include <linux/hardirq.h>
70197+
70198+char *signames[] = {
70199+ [SIGSEGV] = "Segmentation fault",
70200+ [SIGILL] = "Illegal instruction",
70201+ [SIGABRT] = "Abort",
70202+ [SIGBUS] = "Invalid alignment/Bus error"
70203+};
70204+
70205+void
70206+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
70207+{
70208+#ifdef CONFIG_GRKERNSEC_SIGNAL
70209+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
70210+ (sig == SIGABRT) || (sig == SIGBUS))) {
70211+ if (task_pid_nr(t) == task_pid_nr(current)) {
70212+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
70213+ } else {
70214+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
70215+ }
70216+ }
70217+#endif
70218+ return;
70219+}
70220+
70221+int
70222+gr_handle_signal(const struct task_struct *p, const int sig)
70223+{
70224+#ifdef CONFIG_GRKERNSEC
70225+ /* ignore the 0 signal for protected task checks */
70226+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
70227+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
70228+ return -EPERM;
70229+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
70230+ return -EPERM;
70231+ }
70232+#endif
70233+ return 0;
70234+}
70235+
70236+#ifdef CONFIG_GRKERNSEC
70237+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
70238+
70239+int gr_fake_force_sig(int sig, struct task_struct *t)
70240+{
70241+ unsigned long int flags;
70242+ int ret, blocked, ignored;
70243+ struct k_sigaction *action;
70244+
70245+ spin_lock_irqsave(&t->sighand->siglock, flags);
70246+ action = &t->sighand->action[sig-1];
70247+ ignored = action->sa.sa_handler == SIG_IGN;
70248+ blocked = sigismember(&t->blocked, sig);
70249+ if (blocked || ignored) {
70250+ action->sa.sa_handler = SIG_DFL;
70251+ if (blocked) {
70252+ sigdelset(&t->blocked, sig);
70253+ recalc_sigpending_and_wake(t);
70254+ }
70255+ }
70256+ if (action->sa.sa_handler == SIG_DFL)
70257+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
70258+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
70259+
70260+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
70261+
70262+ return ret;
70263+}
70264+#endif
70265+
70266+#ifdef CONFIG_GRKERNSEC_BRUTE
70267+#define GR_USER_BAN_TIME (15 * 60)
70268+#define GR_DAEMON_BRUTE_TIME (30 * 60)
70269+
70270+static int __get_dumpable(unsigned long mm_flags)
70271+{
70272+ int ret;
70273+
70274+ ret = mm_flags & MMF_DUMPABLE_MASK;
70275+ return (ret >= 2) ? 2 : ret;
70276+}
70277+#endif
70278+
70279+void gr_handle_brute_attach(unsigned long mm_flags)
70280+{
70281+#ifdef CONFIG_GRKERNSEC_BRUTE
70282+ struct task_struct *p = current;
70283+ kuid_t uid = GLOBAL_ROOT_UID;
70284+ int daemon = 0;
70285+
70286+ if (!grsec_enable_brute)
70287+ return;
70288+
70289+ rcu_read_lock();
70290+ read_lock(&tasklist_lock);
70291+ read_lock(&grsec_exec_file_lock);
70292+ if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) {
70293+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
70294+ p->real_parent->brute = 1;
70295+ daemon = 1;
70296+ } else {
70297+ const struct cred *cred = __task_cred(p), *cred2;
70298+ struct task_struct *tsk, *tsk2;
70299+
70300+ if (!__get_dumpable(mm_flags) && gr_is_global_nonroot(cred->uid)) {
70301+ struct user_struct *user;
70302+
70303+ uid = cred->uid;
70304+
70305+ /* this is put upon execution past expiration */
70306+ user = find_user(uid);
70307+ if (user == NULL)
70308+ goto unlock;
70309+ user->suid_banned = 1;
70310+ user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME;
70311+ if (user->suid_ban_expires == ~0UL)
70312+ user->suid_ban_expires--;
70313+
70314+ /* only kill other threads of the same binary, from the same user */
70315+ do_each_thread(tsk2, tsk) {
70316+ cred2 = __task_cred(tsk);
70317+ if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file))
70318+ gr_fake_force_sig(SIGKILL, tsk);
70319+ } while_each_thread(tsk2, tsk);
70320+ }
70321+ }
70322+unlock:
70323+ read_unlock(&grsec_exec_file_lock);
70324+ read_unlock(&tasklist_lock);
70325+ rcu_read_unlock();
70326+
70327+ if (gr_is_global_nonroot(uid))
70328+ gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
70329+ else if (daemon)
70330+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
70331+
70332+#endif
70333+ return;
70334+}
70335+
70336+void gr_handle_brute_check(void)
70337+{
70338+#ifdef CONFIG_GRKERNSEC_BRUTE
70339+ struct task_struct *p = current;
70340+
70341+ if (unlikely(p->brute)) {
70342+ if (!grsec_enable_brute)
70343+ p->brute = 0;
70344+ else if (time_before(get_seconds(), p->brute_expires))
70345+ msleep(30 * 1000);
70346+ }
70347+#endif
70348+ return;
70349+}
70350+
70351+void gr_handle_kernel_exploit(void)
70352+{
70353+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
70354+ const struct cred *cred;
70355+ struct task_struct *tsk, *tsk2;
70356+ struct user_struct *user;
70357+ kuid_t uid;
70358+
70359+ if (in_irq() || in_serving_softirq() || in_nmi())
70360+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
70361+
70362+ uid = current_uid();
70363+
70364+ if (gr_is_global_root(uid))
70365+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
70366+ else {
70367+ /* kill all the processes of this user, hold a reference
70368+ to their creds struct, and prevent them from creating
70369+ another process until system reset
70370+ */
70371+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
70372+ GR_GLOBAL_UID(uid));
70373+ /* we intentionally leak this ref */
70374+ user = get_uid(current->cred->user);
70375+ if (user)
70376+ user->kernel_banned = 1;
70377+
70378+ /* kill all processes of this user */
70379+ read_lock(&tasklist_lock);
70380+ do_each_thread(tsk2, tsk) {
70381+ cred = __task_cred(tsk);
70382+ if (uid_eq(cred->uid, uid))
70383+ gr_fake_force_sig(SIGKILL, tsk);
70384+ } while_each_thread(tsk2, tsk);
70385+ read_unlock(&tasklist_lock);
70386+ }
70387+#endif
70388+}
70389+
70390+#ifdef CONFIG_GRKERNSEC_BRUTE
70391+static bool suid_ban_expired(struct user_struct *user)
70392+{
70393+ if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) {
70394+ user->suid_banned = 0;
70395+ user->suid_ban_expires = 0;
70396+ free_uid(user);
70397+ return true;
70398+ }
70399+
70400+ return false;
70401+}
70402+#endif
70403+
70404+int gr_process_kernel_exec_ban(void)
70405+{
70406+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
70407+ if (unlikely(current->cred->user->kernel_banned))
70408+ return -EPERM;
70409+#endif
70410+ return 0;
70411+}
70412+
70413+int gr_process_kernel_setuid_ban(struct user_struct *user)
70414+{
70415+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
70416+ if (unlikely(user->kernel_banned))
70417+ gr_fake_force_sig(SIGKILL, current);
70418+#endif
70419+ return 0;
70420+}
70421+
70422+int gr_process_suid_exec_ban(const struct linux_binprm *bprm)
70423+{
70424+#ifdef CONFIG_GRKERNSEC_BRUTE
70425+ struct user_struct *user = current->cred->user;
70426+ if (unlikely(user->suid_banned)) {
70427+ if (suid_ban_expired(user))
70428+ return 0;
70429+ /* disallow execution of suid binaries only */
70430+ else if (!uid_eq(bprm->cred->euid, current->cred->uid))
70431+ return -EPERM;
70432+ }
70433+#endif
70434+ return 0;
70435+}
70436diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
70437new file mode 100644
70438index 0000000..4030d57
70439--- /dev/null
70440+++ b/grsecurity/grsec_sock.c
70441@@ -0,0 +1,244 @@
70442+#include <linux/kernel.h>
70443+#include <linux/module.h>
70444+#include <linux/sched.h>
70445+#include <linux/file.h>
70446+#include <linux/net.h>
70447+#include <linux/in.h>
70448+#include <linux/ip.h>
70449+#include <net/sock.h>
70450+#include <net/inet_sock.h>
70451+#include <linux/grsecurity.h>
70452+#include <linux/grinternal.h>
70453+#include <linux/gracl.h>
70454+
70455+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
70456+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
70457+
70458+EXPORT_SYMBOL(gr_search_udp_recvmsg);
70459+EXPORT_SYMBOL(gr_search_udp_sendmsg);
70460+
70461+#ifdef CONFIG_UNIX_MODULE
70462+EXPORT_SYMBOL(gr_acl_handle_unix);
70463+EXPORT_SYMBOL(gr_acl_handle_mknod);
70464+EXPORT_SYMBOL(gr_handle_chroot_unix);
70465+EXPORT_SYMBOL(gr_handle_create);
70466+#endif
70467+
70468+#ifdef CONFIG_GRKERNSEC
70469+#define gr_conn_table_size 32749
70470+struct conn_table_entry {
70471+ struct conn_table_entry *next;
70472+ struct signal_struct *sig;
70473+};
70474+
70475+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
70476+DEFINE_SPINLOCK(gr_conn_table_lock);
70477+
70478+extern const char * gr_socktype_to_name(unsigned char type);
70479+extern const char * gr_proto_to_name(unsigned char proto);
70480+extern const char * gr_sockfamily_to_name(unsigned char family);
70481+
70482+static __inline__ int
70483+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
70484+{
70485+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
70486+}
70487+
70488+static __inline__ int
70489+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
70490+ __u16 sport, __u16 dport)
70491+{
70492+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
70493+ sig->gr_sport == sport && sig->gr_dport == dport))
70494+ return 1;
70495+ else
70496+ return 0;
70497+}
70498+
70499+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
70500+{
70501+ struct conn_table_entry **match;
70502+ unsigned int index;
70503+
70504+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
70505+ sig->gr_sport, sig->gr_dport,
70506+ gr_conn_table_size);
70507+
70508+ newent->sig = sig;
70509+
70510+ match = &gr_conn_table[index];
70511+ newent->next = *match;
70512+ *match = newent;
70513+
70514+ return;
70515+}
70516+
70517+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
70518+{
70519+ struct conn_table_entry *match, *last = NULL;
70520+ unsigned int index;
70521+
70522+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
70523+ sig->gr_sport, sig->gr_dport,
70524+ gr_conn_table_size);
70525+
70526+ match = gr_conn_table[index];
70527+ while (match && !conn_match(match->sig,
70528+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
70529+ sig->gr_dport)) {
70530+ last = match;
70531+ match = match->next;
70532+ }
70533+
70534+ if (match) {
70535+ if (last)
70536+ last->next = match->next;
70537+ else
70538+ gr_conn_table[index] = NULL;
70539+ kfree(match);
70540+ }
70541+
70542+ return;
70543+}
70544+
70545+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
70546+ __u16 sport, __u16 dport)
70547+{
70548+ struct conn_table_entry *match;
70549+ unsigned int index;
70550+
70551+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
70552+
70553+ match = gr_conn_table[index];
70554+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
70555+ match = match->next;
70556+
70557+ if (match)
70558+ return match->sig;
70559+ else
70560+ return NULL;
70561+}
70562+
70563+#endif
70564+
70565+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
70566+{
70567+#ifdef CONFIG_GRKERNSEC
70568+ struct signal_struct *sig = task->signal;
70569+ struct conn_table_entry *newent;
70570+
70571+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
70572+ if (newent == NULL)
70573+ return;
70574+ /* no bh lock needed since we are called with bh disabled */
70575+ spin_lock(&gr_conn_table_lock);
70576+ gr_del_task_from_ip_table_nolock(sig);
70577+ sig->gr_saddr = inet->inet_rcv_saddr;
70578+ sig->gr_daddr = inet->inet_daddr;
70579+ sig->gr_sport = inet->inet_sport;
70580+ sig->gr_dport = inet->inet_dport;
70581+ gr_add_to_task_ip_table_nolock(sig, newent);
70582+ spin_unlock(&gr_conn_table_lock);
70583+#endif
70584+ return;
70585+}
70586+
70587+void gr_del_task_from_ip_table(struct task_struct *task)
70588+{
70589+#ifdef CONFIG_GRKERNSEC
70590+ spin_lock_bh(&gr_conn_table_lock);
70591+ gr_del_task_from_ip_table_nolock(task->signal);
70592+ spin_unlock_bh(&gr_conn_table_lock);
70593+#endif
70594+ return;
70595+}
70596+
70597+void
70598+gr_attach_curr_ip(const struct sock *sk)
70599+{
70600+#ifdef CONFIG_GRKERNSEC
70601+ struct signal_struct *p, *set;
70602+ const struct inet_sock *inet = inet_sk(sk);
70603+
70604+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
70605+ return;
70606+
70607+ set = current->signal;
70608+
70609+ spin_lock_bh(&gr_conn_table_lock);
70610+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
70611+ inet->inet_dport, inet->inet_sport);
70612+ if (unlikely(p != NULL)) {
70613+ set->curr_ip = p->curr_ip;
70614+ set->used_accept = 1;
70615+ gr_del_task_from_ip_table_nolock(p);
70616+ spin_unlock_bh(&gr_conn_table_lock);
70617+ return;
70618+ }
70619+ spin_unlock_bh(&gr_conn_table_lock);
70620+
70621+ set->curr_ip = inet->inet_daddr;
70622+ set->used_accept = 1;
70623+#endif
70624+ return;
70625+}
70626+
70627+int
70628+gr_handle_sock_all(const int family, const int type, const int protocol)
70629+{
70630+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
70631+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
70632+ (family != AF_UNIX)) {
70633+ if (family == AF_INET)
70634+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
70635+ else
70636+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
70637+ return -EACCES;
70638+ }
70639+#endif
70640+ return 0;
70641+}
70642+
70643+int
70644+gr_handle_sock_server(const struct sockaddr *sck)
70645+{
70646+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
70647+ if (grsec_enable_socket_server &&
70648+ in_group_p(grsec_socket_server_gid) &&
70649+ sck && (sck->sa_family != AF_UNIX) &&
70650+ (sck->sa_family != AF_LOCAL)) {
70651+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
70652+ return -EACCES;
70653+ }
70654+#endif
70655+ return 0;
70656+}
70657+
70658+int
70659+gr_handle_sock_server_other(const struct sock *sck)
70660+{
70661+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
70662+ if (grsec_enable_socket_server &&
70663+ in_group_p(grsec_socket_server_gid) &&
70664+ sck && (sck->sk_family != AF_UNIX) &&
70665+ (sck->sk_family != AF_LOCAL)) {
70666+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
70667+ return -EACCES;
70668+ }
70669+#endif
70670+ return 0;
70671+}
70672+
70673+int
70674+gr_handle_sock_client(const struct sockaddr *sck)
70675+{
70676+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
70677+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
70678+ sck && (sck->sa_family != AF_UNIX) &&
70679+ (sck->sa_family != AF_LOCAL)) {
70680+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
70681+ return -EACCES;
70682+ }
70683+#endif
70684+ return 0;
70685+}
70686diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
70687new file mode 100644
e2b79cd1 70688index 0000000..301c665
bb5f0bf8
AF
70689--- /dev/null
70690+++ b/grsecurity/grsec_sysctl.c
e2b79cd1 70691@@ -0,0 +1,471 @@
bb5f0bf8
AF
70692+#include <linux/kernel.h>
70693+#include <linux/sched.h>
70694+#include <linux/sysctl.h>
70695+#include <linux/grsecurity.h>
70696+#include <linux/grinternal.h>
70697+
70698+int
70699+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
70700+{
70701+#ifdef CONFIG_GRKERNSEC_SYSCTL
70702+ if (dirname == NULL || name == NULL)
70703+ return 0;
70704+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
70705+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
70706+ return -EACCES;
70707+ }
70708+#endif
70709+ return 0;
70710+}
70711+
70712+#ifdef CONFIG_GRKERNSEC_ROFS
70713+static int __maybe_unused one = 1;
70714+#endif
70715+
70716+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
70717+struct ctl_table grsecurity_table[] = {
70718+#ifdef CONFIG_GRKERNSEC_SYSCTL
70719+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
70720+#ifdef CONFIG_GRKERNSEC_IO
70721+ {
70722+ .procname = "disable_priv_io",
70723+ .data = &grsec_disable_privio,
70724+ .maxlen = sizeof(int),
70725+ .mode = 0600,
70726+ .proc_handler = &proc_dointvec,
70727+ },
70728+#endif
70729+#endif
70730+#ifdef CONFIG_GRKERNSEC_LINK
70731+ {
70732+ .procname = "linking_restrictions",
70733+ .data = &grsec_enable_link,
70734+ .maxlen = sizeof(int),
70735+ .mode = 0600,
70736+ .proc_handler = &proc_dointvec,
70737+ },
70738+#endif
70739+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
70740+ {
70741+ .procname = "enforce_symlinksifowner",
70742+ .data = &grsec_enable_symlinkown,
70743+ .maxlen = sizeof(int),
70744+ .mode = 0600,
70745+ .proc_handler = &proc_dointvec,
70746+ },
70747+ {
70748+ .procname = "symlinkown_gid",
70749+ .data = &grsec_symlinkown_gid,
70750+ .maxlen = sizeof(int),
70751+ .mode = 0600,
70752+ .proc_handler = &proc_dointvec,
70753+ },
70754+#endif
70755+#ifdef CONFIG_GRKERNSEC_BRUTE
70756+ {
70757+ .procname = "deter_bruteforce",
70758+ .data = &grsec_enable_brute,
70759+ .maxlen = sizeof(int),
70760+ .mode = 0600,
70761+ .proc_handler = &proc_dointvec,
70762+ },
70763+#endif
70764+#ifdef CONFIG_GRKERNSEC_FIFO
70765+ {
70766+ .procname = "fifo_restrictions",
70767+ .data = &grsec_enable_fifo,
70768+ .maxlen = sizeof(int),
70769+ .mode = 0600,
70770+ .proc_handler = &proc_dointvec,
70771+ },
70772+#endif
70773+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
70774+ {
70775+ .procname = "ptrace_readexec",
70776+ .data = &grsec_enable_ptrace_readexec,
70777+ .maxlen = sizeof(int),
70778+ .mode = 0600,
70779+ .proc_handler = &proc_dointvec,
70780+ },
70781+#endif
70782+#ifdef CONFIG_GRKERNSEC_SETXID
70783+ {
70784+ .procname = "consistent_setxid",
70785+ .data = &grsec_enable_setxid,
70786+ .maxlen = sizeof(int),
70787+ .mode = 0600,
70788+ .proc_handler = &proc_dointvec,
70789+ },
70790+#endif
70791+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
70792+ {
70793+ .procname = "ip_blackhole",
70794+ .data = &grsec_enable_blackhole,
70795+ .maxlen = sizeof(int),
70796+ .mode = 0600,
70797+ .proc_handler = &proc_dointvec,
70798+ },
70799+ {
70800+ .procname = "lastack_retries",
70801+ .data = &grsec_lastack_retries,
70802+ .maxlen = sizeof(int),
70803+ .mode = 0600,
70804+ .proc_handler = &proc_dointvec,
70805+ },
70806+#endif
70807+#ifdef CONFIG_GRKERNSEC_EXECLOG
70808+ {
70809+ .procname = "exec_logging",
70810+ .data = &grsec_enable_execlog,
70811+ .maxlen = sizeof(int),
70812+ .mode = 0600,
70813+ .proc_handler = &proc_dointvec,
70814+ },
70815+#endif
70816+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
70817+ {
70818+ .procname = "rwxmap_logging",
70819+ .data = &grsec_enable_log_rwxmaps,
70820+ .maxlen = sizeof(int),
70821+ .mode = 0600,
70822+ .proc_handler = &proc_dointvec,
70823+ },
70824+#endif
70825+#ifdef CONFIG_GRKERNSEC_SIGNAL
70826+ {
70827+ .procname = "signal_logging",
70828+ .data = &grsec_enable_signal,
70829+ .maxlen = sizeof(int),
70830+ .mode = 0600,
70831+ .proc_handler = &proc_dointvec,
70832+ },
70833+#endif
70834+#ifdef CONFIG_GRKERNSEC_FORKFAIL
70835+ {
70836+ .procname = "forkfail_logging",
70837+ .data = &grsec_enable_forkfail,
70838+ .maxlen = sizeof(int),
70839+ .mode = 0600,
70840+ .proc_handler = &proc_dointvec,
70841+ },
70842+#endif
70843+#ifdef CONFIG_GRKERNSEC_TIME
70844+ {
70845+ .procname = "timechange_logging",
70846+ .data = &grsec_enable_time,
70847+ .maxlen = sizeof(int),
70848+ .mode = 0600,
70849+ .proc_handler = &proc_dointvec,
70850+ },
70851+#endif
70852+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
70853+ {
70854+ .procname = "chroot_deny_shmat",
70855+ .data = &grsec_enable_chroot_shmat,
70856+ .maxlen = sizeof(int),
70857+ .mode = 0600,
70858+ .proc_handler = &proc_dointvec,
70859+ },
70860+#endif
70861+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
70862+ {
70863+ .procname = "chroot_deny_unix",
70864+ .data = &grsec_enable_chroot_unix,
70865+ .maxlen = sizeof(int),
70866+ .mode = 0600,
70867+ .proc_handler = &proc_dointvec,
70868+ },
70869+#endif
70870+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
70871+ {
70872+ .procname = "chroot_deny_mount",
70873+ .data = &grsec_enable_chroot_mount,
70874+ .maxlen = sizeof(int),
70875+ .mode = 0600,
70876+ .proc_handler = &proc_dointvec,
70877+ },
70878+#endif
70879+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
70880+ {
70881+ .procname = "chroot_deny_fchdir",
70882+ .data = &grsec_enable_chroot_fchdir,
70883+ .maxlen = sizeof(int),
70884+ .mode = 0600,
70885+ .proc_handler = &proc_dointvec,
70886+ },
70887+#endif
70888+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
70889+ {
70890+ .procname = "chroot_deny_chroot",
70891+ .data = &grsec_enable_chroot_double,
70892+ .maxlen = sizeof(int),
70893+ .mode = 0600,
70894+ .proc_handler = &proc_dointvec,
70895+ },
70896+#endif
70897+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
70898+ {
70899+ .procname = "chroot_deny_pivot",
70900+ .data = &grsec_enable_chroot_pivot,
70901+ .maxlen = sizeof(int),
70902+ .mode = 0600,
70903+ .proc_handler = &proc_dointvec,
70904+ },
70905+#endif
70906+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
70907+ {
70908+ .procname = "chroot_enforce_chdir",
70909+ .data = &grsec_enable_chroot_chdir,
70910+ .maxlen = sizeof(int),
70911+ .mode = 0600,
70912+ .proc_handler = &proc_dointvec,
70913+ },
70914+#endif
70915+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
70916+ {
70917+ .procname = "chroot_deny_chmod",
70918+ .data = &grsec_enable_chroot_chmod,
70919+ .maxlen = sizeof(int),
70920+ .mode = 0600,
70921+ .proc_handler = &proc_dointvec,
70922+ },
70923+#endif
70924+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
70925+ {
70926+ .procname = "chroot_deny_mknod",
70927+ .data = &grsec_enable_chroot_mknod,
70928+ .maxlen = sizeof(int),
70929+ .mode = 0600,
70930+ .proc_handler = &proc_dointvec,
70931+ },
70932+#endif
70933+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
70934+ {
70935+ .procname = "chroot_restrict_nice",
70936+ .data = &grsec_enable_chroot_nice,
70937+ .maxlen = sizeof(int),
70938+ .mode = 0600,
70939+ .proc_handler = &proc_dointvec,
70940+ },
70941+#endif
70942+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
70943+ {
70944+ .procname = "chroot_execlog",
70945+ .data = &grsec_enable_chroot_execlog,
70946+ .maxlen = sizeof(int),
70947+ .mode = 0600,
70948+ .proc_handler = &proc_dointvec,
70949+ },
70950+#endif
70951+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
70952+ {
70953+ .procname = "chroot_caps",
70954+ .data = &grsec_enable_chroot_caps,
70955+ .maxlen = sizeof(int),
70956+ .mode = 0600,
70957+ .proc_handler = &proc_dointvec,
70958+ },
70959+#endif
70960+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
70961+ {
70962+ .procname = "chroot_deny_sysctl",
70963+ .data = &grsec_enable_chroot_sysctl,
70964+ .maxlen = sizeof(int),
70965+ .mode = 0600,
70966+ .proc_handler = &proc_dointvec,
70967+ },
70968+#endif
70969+#ifdef CONFIG_GRKERNSEC_TPE
70970+ {
70971+ .procname = "tpe",
70972+ .data = &grsec_enable_tpe,
70973+ .maxlen = sizeof(int),
70974+ .mode = 0600,
70975+ .proc_handler = &proc_dointvec,
70976+ },
70977+ {
70978+ .procname = "tpe_gid",
70979+ .data = &grsec_tpe_gid,
70980+ .maxlen = sizeof(int),
70981+ .mode = 0600,
70982+ .proc_handler = &proc_dointvec,
70983+ },
70984+#endif
70985+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
70986+ {
70987+ .procname = "tpe_invert",
70988+ .data = &grsec_enable_tpe_invert,
70989+ .maxlen = sizeof(int),
70990+ .mode = 0600,
70991+ .proc_handler = &proc_dointvec,
70992+ },
70993+#endif
70994+#ifdef CONFIG_GRKERNSEC_TPE_ALL
70995+ {
70996+ .procname = "tpe_restrict_all",
70997+ .data = &grsec_enable_tpe_all,
70998+ .maxlen = sizeof(int),
70999+ .mode = 0600,
71000+ .proc_handler = &proc_dointvec,
71001+ },
71002+#endif
71003+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
71004+ {
71005+ .procname = "socket_all",
71006+ .data = &grsec_enable_socket_all,
71007+ .maxlen = sizeof(int),
71008+ .mode = 0600,
71009+ .proc_handler = &proc_dointvec,
71010+ },
71011+ {
71012+ .procname = "socket_all_gid",
71013+ .data = &grsec_socket_all_gid,
71014+ .maxlen = sizeof(int),
71015+ .mode = 0600,
71016+ .proc_handler = &proc_dointvec,
71017+ },
71018+#endif
71019+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
71020+ {
71021+ .procname = "socket_client",
71022+ .data = &grsec_enable_socket_client,
71023+ .maxlen = sizeof(int),
71024+ .mode = 0600,
71025+ .proc_handler = &proc_dointvec,
71026+ },
71027+ {
71028+ .procname = "socket_client_gid",
71029+ .data = &grsec_socket_client_gid,
71030+ .maxlen = sizeof(int),
71031+ .mode = 0600,
71032+ .proc_handler = &proc_dointvec,
71033+ },
71034+#endif
71035+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
71036+ {
71037+ .procname = "socket_server",
71038+ .data = &grsec_enable_socket_server,
71039+ .maxlen = sizeof(int),
71040+ .mode = 0600,
71041+ .proc_handler = &proc_dointvec,
71042+ },
71043+ {
71044+ .procname = "socket_server_gid",
71045+ .data = &grsec_socket_server_gid,
71046+ .maxlen = sizeof(int),
71047+ .mode = 0600,
71048+ .proc_handler = &proc_dointvec,
71049+ },
71050+#endif
71051+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
71052+ {
71053+ .procname = "audit_group",
71054+ .data = &grsec_enable_group,
71055+ .maxlen = sizeof(int),
71056+ .mode = 0600,
71057+ .proc_handler = &proc_dointvec,
71058+ },
71059+ {
71060+ .procname = "audit_gid",
71061+ .data = &grsec_audit_gid,
71062+ .maxlen = sizeof(int),
71063+ .mode = 0600,
71064+ .proc_handler = &proc_dointvec,
71065+ },
71066+#endif
71067+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
71068+ {
71069+ .procname = "audit_chdir",
71070+ .data = &grsec_enable_chdir,
71071+ .maxlen = sizeof(int),
71072+ .mode = 0600,
71073+ .proc_handler = &proc_dointvec,
71074+ },
71075+#endif
71076+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
71077+ {
71078+ .procname = "audit_mount",
71079+ .data = &grsec_enable_mount,
71080+ .maxlen = sizeof(int),
71081+ .mode = 0600,
71082+ .proc_handler = &proc_dointvec,
71083+ },
71084+#endif
71085+#ifdef CONFIG_GRKERNSEC_DMESG
71086+ {
71087+ .procname = "dmesg",
71088+ .data = &grsec_enable_dmesg,
71089+ .maxlen = sizeof(int),
71090+ .mode = 0600,
71091+ .proc_handler = &proc_dointvec,
71092+ },
71093+#endif
71094+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
71095+ {
71096+ .procname = "chroot_findtask",
71097+ .data = &grsec_enable_chroot_findtask,
71098+ .maxlen = sizeof(int),
71099+ .mode = 0600,
71100+ .proc_handler = &proc_dointvec,
71101+ },
71102+#endif
71103+#ifdef CONFIG_GRKERNSEC_RESLOG
71104+ {
71105+ .procname = "resource_logging",
71106+ .data = &grsec_resource_logging,
71107+ .maxlen = sizeof(int),
71108+ .mode = 0600,
71109+ .proc_handler = &proc_dointvec,
71110+ },
71111+#endif
71112+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
71113+ {
71114+ .procname = "audit_ptrace",
71115+ .data = &grsec_enable_audit_ptrace,
71116+ .maxlen = sizeof(int),
71117+ .mode = 0600,
71118+ .proc_handler = &proc_dointvec,
71119+ },
71120+#endif
71121+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
71122+ {
71123+ .procname = "harden_ptrace",
71124+ .data = &grsec_enable_harden_ptrace,
71125+ .maxlen = sizeof(int),
71126+ .mode = 0600,
71127+ .proc_handler = &proc_dointvec,
71128+ },
71129+#endif
71130+ {
71131+ .procname = "grsec_lock",
71132+ .data = &grsec_lock,
71133+ .maxlen = sizeof(int),
71134+ .mode = 0600,
71135+ .proc_handler = &proc_dointvec,
71136+ },
71137+#endif
71138+#ifdef CONFIG_GRKERNSEC_ROFS
71139+ {
71140+ .procname = "romount_protect",
71141+ .data = &grsec_enable_rofs,
71142+ .maxlen = sizeof(int),
71143+ .mode = 0600,
71144+ .proc_handler = &proc_dointvec_minmax,
71145+ .extra1 = &one,
71146+ .extra2 = &one,
71147+ },
71148+#endif
e2b79cd1
AF
71149+#ifdef CONFIG_GRKERNSEC_DENYUSB
71150+ {
71151+ .procname = "deny_new_usb",
71152+ .data = &grsec_deny_new_usb,
71153+ .maxlen = sizeof(int),
71154+ .mode = 0600,
71155+ .proc_handler = &proc_dointvec_minmax,
71156+ .extra1 = &one,
71157+ .extra2 = &one,
71158+ },
71159+#endif
bb5f0bf8
AF
71160+ { }
71161+};
71162+#endif
71163diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
71164new file mode 100644
71165index 0000000..0dc13c3
71166--- /dev/null
71167+++ b/grsecurity/grsec_time.c
71168@@ -0,0 +1,16 @@
71169+#include <linux/kernel.h>
71170+#include <linux/sched.h>
71171+#include <linux/grinternal.h>
71172+#include <linux/module.h>
71173+
71174+void
71175+gr_log_timechange(void)
71176+{
71177+#ifdef CONFIG_GRKERNSEC_TIME
71178+ if (grsec_enable_time)
71179+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
71180+#endif
71181+ return;
71182+}
71183+
71184+EXPORT_SYMBOL(gr_log_timechange);
71185diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
71186new file mode 100644
71187index 0000000..ee57dcf
71188--- /dev/null
71189+++ b/grsecurity/grsec_tpe.c
71190@@ -0,0 +1,73 @@
71191+#include <linux/kernel.h>
71192+#include <linux/sched.h>
71193+#include <linux/file.h>
71194+#include <linux/fs.h>
71195+#include <linux/grinternal.h>
71196+
71197+extern int gr_acl_tpe_check(void);
71198+
71199+int
71200+gr_tpe_allow(const struct file *file)
71201+{
71202+#ifdef CONFIG_GRKERNSEC
71203+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
71204+ const struct cred *cred = current_cred();
71205+ char *msg = NULL;
71206+ char *msg2 = NULL;
71207+
71208+ // never restrict root
71209+ if (gr_is_global_root(cred->uid))
71210+ return 1;
71211+
71212+ if (grsec_enable_tpe) {
71213+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
71214+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
71215+ msg = "not being in trusted group";
71216+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
71217+ msg = "being in untrusted group";
71218+#else
71219+ if (in_group_p(grsec_tpe_gid))
71220+ msg = "being in untrusted group";
71221+#endif
71222+ }
71223+ if (!msg && gr_acl_tpe_check())
71224+ msg = "being in untrusted role";
71225+
71226+ // not in any affected group/role
71227+ if (!msg)
71228+ goto next_check;
71229+
71230+ if (gr_is_global_nonroot(inode->i_uid))
71231+ msg2 = "file in non-root-owned directory";
71232+ else if (inode->i_mode & S_IWOTH)
71233+ msg2 = "file in world-writable directory";
71234+ else if (inode->i_mode & S_IWGRP)
71235+ msg2 = "file in group-writable directory";
71236+
71237+ if (msg && msg2) {
71238+ char fullmsg[70] = {0};
71239+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
71240+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
71241+ return 0;
71242+ }
71243+ msg = NULL;
71244+next_check:
71245+#ifdef CONFIG_GRKERNSEC_TPE_ALL
71246+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
71247+ return 1;
71248+
71249+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
71250+ msg = "directory not owned by user";
71251+ else if (inode->i_mode & S_IWOTH)
71252+ msg = "file in world-writable directory";
71253+ else if (inode->i_mode & S_IWGRP)
71254+ msg = "file in group-writable directory";
71255+
71256+ if (msg) {
71257+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
71258+ return 0;
71259+ }
71260+#endif
71261+#endif
71262+ return 1;
71263+}
e2b79cd1
AF
71264diff --git a/grsecurity/grsec_usb.c b/grsecurity/grsec_usb.c
71265new file mode 100644
71266index 0000000..ae02d8e
71267--- /dev/null
71268+++ b/grsecurity/grsec_usb.c
71269@@ -0,0 +1,15 @@
71270+#include <linux/kernel.h>
71271+#include <linux/grinternal.h>
71272+#include <linux/module.h>
71273+
71274+int gr_handle_new_usb(void)
71275+{
71276+#ifdef CONFIG_GRKERNSEC_DENYUSB
71277+ if (grsec_deny_new_usb) {
71278+ printk(KERN_ALERT "grsec: denied insert of new USB device\n");
71279+ return 1;
71280+ }
71281+#endif
71282+ return 0;
71283+}
71284+EXPORT_SYMBOL_GPL(gr_handle_new_usb);
bb5f0bf8
AF
71285diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
71286new file mode 100644
71287index 0000000..9f7b1ac
71288--- /dev/null
71289+++ b/grsecurity/grsum.c
71290@@ -0,0 +1,61 @@
71291+#include <linux/err.h>
71292+#include <linux/kernel.h>
71293+#include <linux/sched.h>
71294+#include <linux/mm.h>
71295+#include <linux/scatterlist.h>
71296+#include <linux/crypto.h>
71297+#include <linux/gracl.h>
71298+
71299+
71300+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
71301+#error "crypto and sha256 must be built into the kernel"
71302+#endif
71303+
71304+int
71305+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
71306+{
71307+ char *p;
71308+ struct crypto_hash *tfm;
71309+ struct hash_desc desc;
71310+ struct scatterlist sg;
71311+ unsigned char temp_sum[GR_SHA_LEN];
71312+ volatile int retval = 0;
71313+ volatile int dummy = 0;
71314+ unsigned int i;
71315+
71316+ sg_init_table(&sg, 1);
71317+
71318+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
71319+ if (IS_ERR(tfm)) {
71320+ /* should never happen, since sha256 should be built in */
71321+ return 1;
71322+ }
71323+
71324+ desc.tfm = tfm;
71325+ desc.flags = 0;
71326+
71327+ crypto_hash_init(&desc);
71328+
71329+ p = salt;
71330+ sg_set_buf(&sg, p, GR_SALT_LEN);
71331+ crypto_hash_update(&desc, &sg, sg.length);
71332+
71333+ p = entry->pw;
71334+ sg_set_buf(&sg, p, strlen(p));
71335+
71336+ crypto_hash_update(&desc, &sg, sg.length);
71337+
71338+ crypto_hash_final(&desc, temp_sum);
71339+
71340+ memset(entry->pw, 0, GR_PW_LEN);
71341+
71342+ for (i = 0; i < GR_SHA_LEN; i++)
71343+ if (sum[i] != temp_sum[i])
71344+ retval = 1;
71345+ else
71346+ dummy = 1; // waste a cycle
71347+
71348+ crypto_free_hash(tfm);
71349+
71350+ return retval;
71351+}
71352diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
71353index 77ff547..181834f 100644
71354--- a/include/asm-generic/4level-fixup.h
71355+++ b/include/asm-generic/4level-fixup.h
71356@@ -13,8 +13,10 @@
71357 #define pmd_alloc(mm, pud, address) \
71358 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
71359 NULL: pmd_offset(pud, address))
71360+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
71361
71362 #define pud_alloc(mm, pgd, address) (pgd)
71363+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
71364 #define pud_offset(pgd, start) (pgd)
71365 #define pud_none(pud) 0
71366 #define pud_bad(pud) 0
71367diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
71368index b7babf0..04ad282 100644
71369--- a/include/asm-generic/atomic-long.h
71370+++ b/include/asm-generic/atomic-long.h
71371@@ -22,6 +22,12 @@
71372
71373 typedef atomic64_t atomic_long_t;
71374
71375+#ifdef CONFIG_PAX_REFCOUNT
71376+typedef atomic64_unchecked_t atomic_long_unchecked_t;
71377+#else
71378+typedef atomic64_t atomic_long_unchecked_t;
71379+#endif
71380+
71381 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
71382
71383 static inline long atomic_long_read(atomic_long_t *l)
71384@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
71385 return (long)atomic64_read(v);
71386 }
71387
71388+#ifdef CONFIG_PAX_REFCOUNT
71389+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
71390+{
71391+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
71392+
71393+ return (long)atomic64_read_unchecked(v);
71394+}
71395+#endif
71396+
71397 static inline void atomic_long_set(atomic_long_t *l, long i)
71398 {
71399 atomic64_t *v = (atomic64_t *)l;
71400@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
71401 atomic64_set(v, i);
71402 }
71403
71404+#ifdef CONFIG_PAX_REFCOUNT
71405+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
71406+{
71407+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
71408+
71409+ atomic64_set_unchecked(v, i);
71410+}
71411+#endif
71412+
71413 static inline void atomic_long_inc(atomic_long_t *l)
71414 {
71415 atomic64_t *v = (atomic64_t *)l;
71416@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
71417 atomic64_inc(v);
71418 }
71419
71420+#ifdef CONFIG_PAX_REFCOUNT
71421+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
71422+{
71423+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
71424+
71425+ atomic64_inc_unchecked(v);
71426+}
71427+#endif
71428+
71429 static inline void atomic_long_dec(atomic_long_t *l)
71430 {
71431 atomic64_t *v = (atomic64_t *)l;
71432@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
71433 atomic64_dec(v);
71434 }
71435
71436+#ifdef CONFIG_PAX_REFCOUNT
71437+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
71438+{
71439+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
71440+
71441+ atomic64_dec_unchecked(v);
71442+}
71443+#endif
71444+
71445 static inline void atomic_long_add(long i, atomic_long_t *l)
71446 {
71447 atomic64_t *v = (atomic64_t *)l;
71448@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
71449 atomic64_add(i, v);
71450 }
71451
71452+#ifdef CONFIG_PAX_REFCOUNT
71453+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
71454+{
71455+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
71456+
71457+ atomic64_add_unchecked(i, v);
71458+}
71459+#endif
71460+
71461 static inline void atomic_long_sub(long i, atomic_long_t *l)
71462 {
71463 atomic64_t *v = (atomic64_t *)l;
71464@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
71465 atomic64_sub(i, v);
71466 }
71467
71468+#ifdef CONFIG_PAX_REFCOUNT
71469+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
71470+{
71471+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
71472+
71473+ atomic64_sub_unchecked(i, v);
71474+}
71475+#endif
71476+
71477 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
71478 {
71479 atomic64_t *v = (atomic64_t *)l;
71480@@ -101,6 +161,15 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
71481 return (long)atomic64_add_return(i, v);
71482 }
71483
71484+#ifdef CONFIG_PAX_REFCOUNT
71485+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
71486+{
71487+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
71488+
71489+ return (long)atomic64_add_return_unchecked(i, v);
71490+}
71491+#endif
71492+
71493 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
71494 {
71495 atomic64_t *v = (atomic64_t *)l;
71496@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
71497 return (long)atomic64_inc_return(v);
71498 }
71499
71500+#ifdef CONFIG_PAX_REFCOUNT
71501+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
71502+{
71503+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
71504+
71505+ return (long)atomic64_inc_return_unchecked(v);
71506+}
71507+#endif
71508+
71509 static inline long atomic_long_dec_return(atomic_long_t *l)
71510 {
71511 atomic64_t *v = (atomic64_t *)l;
71512@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
71513
71514 typedef atomic_t atomic_long_t;
71515
71516+#ifdef CONFIG_PAX_REFCOUNT
71517+typedef atomic_unchecked_t atomic_long_unchecked_t;
71518+#else
71519+typedef atomic_t atomic_long_unchecked_t;
71520+#endif
71521+
71522 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
71523 static inline long atomic_long_read(atomic_long_t *l)
71524 {
71525@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
71526 return (long)atomic_read(v);
71527 }
71528
71529+#ifdef CONFIG_PAX_REFCOUNT
71530+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
71531+{
71532+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
71533+
71534+ return (long)atomic_read_unchecked(v);
71535+}
71536+#endif
71537+
71538 static inline void atomic_long_set(atomic_long_t *l, long i)
71539 {
71540 atomic_t *v = (atomic_t *)l;
71541@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
71542 atomic_set(v, i);
71543 }
71544
71545+#ifdef CONFIG_PAX_REFCOUNT
71546+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
71547+{
71548+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
71549+
71550+ atomic_set_unchecked(v, i);
71551+}
71552+#endif
71553+
71554 static inline void atomic_long_inc(atomic_long_t *l)
71555 {
71556 atomic_t *v = (atomic_t *)l;
71557@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
71558 atomic_inc(v);
71559 }
71560
71561+#ifdef CONFIG_PAX_REFCOUNT
71562+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
71563+{
71564+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
71565+
71566+ atomic_inc_unchecked(v);
71567+}
71568+#endif
71569+
71570 static inline void atomic_long_dec(atomic_long_t *l)
71571 {
71572 atomic_t *v = (atomic_t *)l;
71573@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
71574 atomic_dec(v);
71575 }
71576
71577+#ifdef CONFIG_PAX_REFCOUNT
71578+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
71579+{
71580+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
71581+
71582+ atomic_dec_unchecked(v);
71583+}
71584+#endif
71585+
71586 static inline void atomic_long_add(long i, atomic_long_t *l)
71587 {
71588 atomic_t *v = (atomic_t *)l;
71589@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
71590 atomic_add(i, v);
71591 }
71592
71593+#ifdef CONFIG_PAX_REFCOUNT
71594+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
71595+{
71596+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
71597+
71598+ atomic_add_unchecked(i, v);
71599+}
71600+#endif
71601+
71602 static inline void atomic_long_sub(long i, atomic_long_t *l)
71603 {
71604 atomic_t *v = (atomic_t *)l;
71605@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
71606 atomic_sub(i, v);
71607 }
71608
71609+#ifdef CONFIG_PAX_REFCOUNT
71610+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
71611+{
71612+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
71613+
71614+ atomic_sub_unchecked(i, v);
71615+}
71616+#endif
71617+
71618 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
71619 {
71620 atomic_t *v = (atomic_t *)l;
71621@@ -218,6 +356,16 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
71622 return (long)atomic_add_return(i, v);
71623 }
71624
71625+#ifdef CONFIG_PAX_REFCOUNT
71626+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
71627+{
71628+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
71629+
71630+ return (long)atomic_add_return_unchecked(i, v);
71631+}
71632+
71633+#endif
71634+
71635 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
71636 {
71637 atomic_t *v = (atomic_t *)l;
71638@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
71639 return (long)atomic_inc_return(v);
71640 }
71641
71642+#ifdef CONFIG_PAX_REFCOUNT
71643+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
71644+{
71645+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
71646+
71647+ return (long)atomic_inc_return_unchecked(v);
71648+}
71649+#endif
71650+
71651 static inline long atomic_long_dec_return(atomic_long_t *l)
71652 {
71653 atomic_t *v = (atomic_t *)l;
71654@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
71655
71656 #endif /* BITS_PER_LONG == 64 */
71657
71658+#ifdef CONFIG_PAX_REFCOUNT
71659+static inline void pax_refcount_needs_these_functions(void)
71660+{
71661+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
71662+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
71663+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
71664+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
71665+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
71666+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
71667+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
71668+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
71669+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
71670+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
71671+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
71672+#ifdef CONFIG_X86
71673+ atomic_clear_mask_unchecked(0, NULL);
71674+ atomic_set_mask_unchecked(0, NULL);
71675+#endif
71676+
71677+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
71678+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
71679+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
71680+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
71681+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
71682+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
71683+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
71684+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
71685+}
71686+#else
71687+#define atomic_read_unchecked(v) atomic_read(v)
71688+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
71689+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
71690+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
71691+#define atomic_inc_unchecked(v) atomic_inc(v)
71692+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
71693+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
71694+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
71695+#define atomic_dec_unchecked(v) atomic_dec(v)
71696+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
71697+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
71698+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
71699+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
71700+
71701+#define atomic_long_read_unchecked(v) atomic_long_read(v)
71702+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
71703+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
71704+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
71705+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
71706+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
71707+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
71708+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
71709+#endif
71710+
71711 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
71712diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
71713index 33bd2de..f31bff97 100644
71714--- a/include/asm-generic/atomic.h
71715+++ b/include/asm-generic/atomic.h
71716@@ -153,7 +153,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
71717 * Atomically clears the bits set in @mask from @v
71718 */
71719 #ifndef atomic_clear_mask
71720-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
71721+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
71722 {
71723 unsigned long flags;
71724
71725diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
71726index b18ce4f..2ee2843 100644
71727--- a/include/asm-generic/atomic64.h
71728+++ b/include/asm-generic/atomic64.h
71729@@ -16,6 +16,8 @@ typedef struct {
71730 long long counter;
71731 } atomic64_t;
71732
71733+typedef atomic64_t atomic64_unchecked_t;
71734+
71735 #define ATOMIC64_INIT(i) { (i) }
71736
71737 extern long long atomic64_read(const atomic64_t *v);
71738@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
71739 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
71740 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
71741
71742+#define atomic64_read_unchecked(v) atomic64_read(v)
71743+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
71744+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
71745+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
71746+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
71747+#define atomic64_inc_unchecked(v) atomic64_inc(v)
71748+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
71749+#define atomic64_dec_unchecked(v) atomic64_dec(v)
71750+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
71751+
71752 #endif /* _ASM_GENERIC_ATOMIC64_H */
71753diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
71754index 1bfcfe5..e04c5c9 100644
71755--- a/include/asm-generic/cache.h
71756+++ b/include/asm-generic/cache.h
71757@@ -6,7 +6,7 @@
71758 * cache lines need to provide their own cache.h.
71759 */
71760
71761-#define L1_CACHE_SHIFT 5
71762-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
71763+#define L1_CACHE_SHIFT 5UL
71764+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
71765
71766 #endif /* __ASM_GENERIC_CACHE_H */
71767diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
71768index 0d68a1e..b74a761 100644
71769--- a/include/asm-generic/emergency-restart.h
71770+++ b/include/asm-generic/emergency-restart.h
71771@@ -1,7 +1,7 @@
71772 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
71773 #define _ASM_GENERIC_EMERGENCY_RESTART_H
71774
71775-static inline void machine_emergency_restart(void)
71776+static inline __noreturn void machine_emergency_restart(void)
71777 {
71778 machine_restart(NULL);
71779 }
71780diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
71781index 90f99c7..00ce236 100644
71782--- a/include/asm-generic/kmap_types.h
71783+++ b/include/asm-generic/kmap_types.h
71784@@ -2,9 +2,9 @@
71785 #define _ASM_GENERIC_KMAP_TYPES_H
71786
71787 #ifdef __WITH_KM_FENCE
71788-# define KM_TYPE_NR 41
71789+# define KM_TYPE_NR 42
71790 #else
71791-# define KM_TYPE_NR 20
71792+# define KM_TYPE_NR 21
71793 #endif
71794
71795 #endif
71796diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
71797index 9ceb03b..62b0b8f 100644
71798--- a/include/asm-generic/local.h
71799+++ b/include/asm-generic/local.h
71800@@ -23,24 +23,37 @@ typedef struct
71801 atomic_long_t a;
71802 } local_t;
71803
71804+typedef struct {
71805+ atomic_long_unchecked_t a;
71806+} local_unchecked_t;
71807+
71808 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
71809
71810 #define local_read(l) atomic_long_read(&(l)->a)
71811+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
71812 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
71813+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
71814 #define local_inc(l) atomic_long_inc(&(l)->a)
71815+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
71816 #define local_dec(l) atomic_long_dec(&(l)->a)
71817+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
71818 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
71819+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
71820 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
71821+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
71822
71823 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
71824 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
71825 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
71826 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
71827 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
71828+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
71829 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
71830 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
71831+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
71832
71833 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
71834+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
71835 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
71836 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
71837 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
71838diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
71839index 725612b..9cc513a 100644
71840--- a/include/asm-generic/pgtable-nopmd.h
71841+++ b/include/asm-generic/pgtable-nopmd.h
71842@@ -1,14 +1,19 @@
71843 #ifndef _PGTABLE_NOPMD_H
71844 #define _PGTABLE_NOPMD_H
71845
71846-#ifndef __ASSEMBLY__
71847-
71848 #include <asm-generic/pgtable-nopud.h>
71849
71850-struct mm_struct;
71851-
71852 #define __PAGETABLE_PMD_FOLDED
71853
71854+#define PMD_SHIFT PUD_SHIFT
71855+#define PTRS_PER_PMD 1
71856+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
71857+#define PMD_MASK (~(PMD_SIZE-1))
71858+
71859+#ifndef __ASSEMBLY__
71860+
71861+struct mm_struct;
71862+
71863 /*
71864 * Having the pmd type consist of a pud gets the size right, and allows
71865 * us to conceptually access the pud entry that this pmd is folded into
71866@@ -16,11 +21,6 @@ struct mm_struct;
71867 */
71868 typedef struct { pud_t pud; } pmd_t;
71869
71870-#define PMD_SHIFT PUD_SHIFT
71871-#define PTRS_PER_PMD 1
71872-#define PMD_SIZE (1UL << PMD_SHIFT)
71873-#define PMD_MASK (~(PMD_SIZE-1))
71874-
71875 /*
71876 * The "pud_xxx()" functions here are trivial for a folded two-level
71877 * setup: the pmd is never bad, and a pmd always exists (as it's folded
71878diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
71879index 810431d..0ec4804f 100644
71880--- a/include/asm-generic/pgtable-nopud.h
71881+++ b/include/asm-generic/pgtable-nopud.h
71882@@ -1,10 +1,15 @@
71883 #ifndef _PGTABLE_NOPUD_H
71884 #define _PGTABLE_NOPUD_H
71885
71886-#ifndef __ASSEMBLY__
71887-
71888 #define __PAGETABLE_PUD_FOLDED
71889
71890+#define PUD_SHIFT PGDIR_SHIFT
71891+#define PTRS_PER_PUD 1
71892+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
71893+#define PUD_MASK (~(PUD_SIZE-1))
71894+
71895+#ifndef __ASSEMBLY__
71896+
71897 /*
71898 * Having the pud type consist of a pgd gets the size right, and allows
71899 * us to conceptually access the pgd entry that this pud is folded into
71900@@ -12,11 +17,6 @@
71901 */
71902 typedef struct { pgd_t pgd; } pud_t;
71903
71904-#define PUD_SHIFT PGDIR_SHIFT
71905-#define PTRS_PER_PUD 1
71906-#define PUD_SIZE (1UL << PUD_SHIFT)
71907-#define PUD_MASK (~(PUD_SIZE-1))
71908-
71909 /*
71910 * The "pgd_xxx()" functions here are trivial for a folded two-level
71911 * setup: the pud is never bad, and a pud always exists (as it's folded
71912@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
71913 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
71914
71915 #define pgd_populate(mm, pgd, pud) do { } while (0)
71916+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
71917 /*
71918 * (puds are folded into pgds so this doesn't get actually called,
71919 * but the define is needed for a generic inline function.)
71920diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
71921index a59ff51..2594a70 100644
71922--- a/include/asm-generic/pgtable.h
71923+++ b/include/asm-generic/pgtable.h
71924@@ -688,6 +688,14 @@ static inline pmd_t pmd_mknuma(pmd_t pmd)
71925 }
71926 #endif /* CONFIG_NUMA_BALANCING */
71927
71928+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
71929+static inline unsigned long pax_open_kernel(void) { return 0; }
71930+#endif
71931+
71932+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
71933+static inline unsigned long pax_close_kernel(void) { return 0; }
71934+#endif
71935+
71936 #endif /* CONFIG_MMU */
71937
71938 #endif /* !__ASSEMBLY__ */
71939diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
71940index c184aa8..d049942 100644
71941--- a/include/asm-generic/uaccess.h
71942+++ b/include/asm-generic/uaccess.h
71943@@ -343,4 +343,12 @@ clear_user(void __user *to, unsigned long n)
71944 return __clear_user(to, n);
71945 }
71946
71947+#ifndef __HAVE_ARCH_PAX_OPEN_USERLAND
71948+//static inline unsigned long pax_open_userland(void) { return 0; }
71949+#endif
71950+
71951+#ifndef __HAVE_ARCH_PAX_CLOSE_USERLAND
71952+//static inline unsigned long pax_close_userland(void) { return 0; }
71953+#endif
71954+
71955 #endif /* __ASM_GENERIC_UACCESS_H */
71956diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
71957index eb58d2d..df131bf 100644
71958--- a/include/asm-generic/vmlinux.lds.h
71959+++ b/include/asm-generic/vmlinux.lds.h
71960@@ -239,6 +239,7 @@
71961 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
71962 VMLINUX_SYMBOL(__start_rodata) = .; \
71963 *(.rodata) *(.rodata.*) \
71964+ *(.data..read_only) \
71965 *(__vermagic) /* Kernel version magic */ \
71966 . = ALIGN(8); \
71967 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
71968@@ -749,17 +750,18 @@
71969 * section in the linker script will go there too. @phdr should have
71970 * a leading colon.
71971 *
71972- * Note that this macros defines __per_cpu_load as an absolute symbol.
71973+ * Note that this macros defines per_cpu_load as an absolute symbol.
71974 * If there is no need to put the percpu section at a predetermined
71975 * address, use PERCPU_SECTION.
71976 */
71977 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
71978- VMLINUX_SYMBOL(__per_cpu_load) = .; \
71979- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
71980+ per_cpu_load = .; \
71981+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
71982 - LOAD_OFFSET) { \
71983+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
71984 PERCPU_INPUT(cacheline) \
71985 } phdr \
71986- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
71987+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
71988
71989 /**
71990 * PERCPU_SECTION - define output section for percpu area, simple version
71991diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
71992index 418d270..bfd2794 100644
71993--- a/include/crypto/algapi.h
71994+++ b/include/crypto/algapi.h
71995@@ -34,7 +34,7 @@ struct crypto_type {
71996 unsigned int maskclear;
71997 unsigned int maskset;
71998 unsigned int tfmsize;
71999-};
72000+} __do_const;
72001
72002 struct crypto_instance {
72003 struct crypto_alg alg;
72004diff --git a/include/drm/drmP.h b/include/drm/drmP.h
72005index 63d17ee..716de2b 100644
72006--- a/include/drm/drmP.h
72007+++ b/include/drm/drmP.h
72008@@ -72,6 +72,7 @@
72009 #include <linux/workqueue.h>
72010 #include <linux/poll.h>
72011 #include <asm/pgalloc.h>
72012+#include <asm/local.h>
72013 #include <drm/drm.h>
72014 #include <drm/drm_sarea.h>
72015
72016@@ -296,10 +297,12 @@ do { \
72017 * \param cmd command.
72018 * \param arg argument.
72019 */
72020-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
72021+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
72022+ struct drm_file *file_priv);
72023+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
72024 struct drm_file *file_priv);
72025
72026-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
72027+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
72028 unsigned long arg);
72029
72030 #define DRM_IOCTL_NR(n) _IOC_NR(n)
72031@@ -314,10 +317,10 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
72032 struct drm_ioctl_desc {
72033 unsigned int cmd;
72034 int flags;
72035- drm_ioctl_t *func;
72036+ drm_ioctl_t func;
72037 unsigned int cmd_drv;
72038 const char *name;
72039-};
72040+} __do_const;
72041
72042 /**
72043 * Creates a driver or general drm_ioctl_desc array entry for the given
72044@@ -1015,7 +1018,7 @@ struct drm_info_list {
72045 int (*show)(struct seq_file*, void*); /** show callback */
72046 u32 driver_features; /**< Required driver features for this entry */
72047 void *data;
72048-};
72049+} __do_const;
72050
72051 /**
72052 * debugfs node structure. This structure represents a debugfs file.
72053@@ -1088,7 +1091,7 @@ struct drm_device {
72054
72055 /** \name Usage Counters */
72056 /*@{ */
72057- int open_count; /**< Outstanding files open */
72058+ local_t open_count; /**< Outstanding files open */
72059 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
72060 atomic_t vma_count; /**< Outstanding vma areas open */
72061 int buf_use; /**< Buffers in use -- cannot alloc */
72062@@ -1099,7 +1102,7 @@ struct drm_device {
72063 /*@{ */
72064 unsigned long counters;
72065 enum drm_stat_type types[15];
72066- atomic_t counts[15];
72067+ atomic_unchecked_t counts[15];
72068 /*@} */
72069
72070 struct list_head filelist;
72071diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
72072index f43d556..94d9343 100644
72073--- a/include/drm/drm_crtc_helper.h
72074+++ b/include/drm/drm_crtc_helper.h
72075@@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs {
72076 struct drm_connector *connector);
72077 /* disable encoder when not in use - more explicit than dpms off */
72078 void (*disable)(struct drm_encoder *encoder);
72079-};
72080+} __no_const;
72081
72082 /**
72083 * drm_connector_helper_funcs - helper operations for connectors
72084diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
72085index 72dcbe8..8db58d7 100644
72086--- a/include/drm/ttm/ttm_memory.h
72087+++ b/include/drm/ttm/ttm_memory.h
72088@@ -48,7 +48,7 @@
72089
72090 struct ttm_mem_shrink {
72091 int (*do_shrink) (struct ttm_mem_shrink *);
72092-};
72093+} __no_const;
72094
72095 /**
72096 * struct ttm_mem_global - Global memory accounting structure.
72097diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
72098index 4b840e8..155d235 100644
72099--- a/include/keys/asymmetric-subtype.h
72100+++ b/include/keys/asymmetric-subtype.h
72101@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
72102 /* Verify the signature on a key of this subtype (optional) */
72103 int (*verify_signature)(const struct key *key,
72104 const struct public_key_signature *sig);
72105-};
72106+} __do_const;
72107
72108 /**
72109 * asymmetric_key_subtype - Get the subtype from an asymmetric key
72110diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
72111index c1da539..1dcec55 100644
72112--- a/include/linux/atmdev.h
72113+++ b/include/linux/atmdev.h
72114@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
72115 #endif
72116
72117 struct k_atm_aal_stats {
72118-#define __HANDLE_ITEM(i) atomic_t i
72119+#define __HANDLE_ITEM(i) atomic_unchecked_t i
72120 __AAL_STAT_ITEMS
72121 #undef __HANDLE_ITEM
72122 };
72123@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
72124 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
72125 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
72126 struct module *owner;
72127-};
72128+} __do_const ;
72129
72130 struct atmphy_ops {
72131 int (*start)(struct atm_dev *dev);
72132diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
72133index 70cf138..0418ee2 100644
72134--- a/include/linux/binfmts.h
72135+++ b/include/linux/binfmts.h
72136@@ -73,8 +73,10 @@ struct linux_binfmt {
72137 int (*load_binary)(struct linux_binprm *);
72138 int (*load_shlib)(struct file *);
72139 int (*core_dump)(struct coredump_params *cprm);
72140+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
72141+ void (*handle_mmap)(struct file *);
72142 unsigned long min_coredump; /* minimal dump size */
72143-};
72144+} __do_const;
72145
72146 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
72147
72148diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
72149index 2fdb4a4..54aad7e 100644
72150--- a/include/linux/blkdev.h
72151+++ b/include/linux/blkdev.h
72152@@ -1526,7 +1526,7 @@ struct block_device_operations {
72153 /* this callback is with swap_lock and sometimes page table lock held */
72154 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
72155 struct module *owner;
72156-};
72157+} __do_const;
72158
72159 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
72160 unsigned long);
72161diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
72162index 7c2e030..b72475d 100644
72163--- a/include/linux/blktrace_api.h
72164+++ b/include/linux/blktrace_api.h
72165@@ -23,7 +23,7 @@ struct blk_trace {
72166 struct dentry *dir;
72167 struct dentry *dropped_file;
72168 struct dentry *msg_file;
72169- atomic_t dropped;
72170+ atomic_unchecked_t dropped;
72171 };
72172
72173 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
72174diff --git a/include/linux/cache.h b/include/linux/cache.h
72175index 4c57065..4307975 100644
72176--- a/include/linux/cache.h
72177+++ b/include/linux/cache.h
72178@@ -16,6 +16,10 @@
72179 #define __read_mostly
72180 #endif
72181
72182+#ifndef __read_only
72183+#define __read_only __read_mostly
72184+#endif
72185+
72186 #ifndef ____cacheline_aligned
72187 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
72188 #endif
72189diff --git a/include/linux/capability.h b/include/linux/capability.h
72190index d9a4f7f4..19f77d6 100644
72191--- a/include/linux/capability.h
72192+++ b/include/linux/capability.h
72193@@ -213,8 +213,13 @@ extern bool ns_capable(struct user_namespace *ns, int cap);
72194 extern bool nsown_capable(int cap);
72195 extern bool inode_capable(const struct inode *inode, int cap);
72196 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
72197+extern bool capable_nolog(int cap);
72198+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
72199+extern bool inode_capable_nolog(const struct inode *inode, int cap);
72200
72201 /* audit system wants to get cap info from files as well */
72202 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
72203
72204+extern int is_privileged_binary(const struct dentry *dentry);
72205+
72206 #endif /* !_LINUX_CAPABILITY_H */
72207diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
72208index 8609d57..86e4d79 100644
72209--- a/include/linux/cdrom.h
72210+++ b/include/linux/cdrom.h
72211@@ -87,7 +87,6 @@ struct cdrom_device_ops {
72212
72213 /* driver specifications */
72214 const int capability; /* capability flags */
72215- int n_minors; /* number of active minor devices */
72216 /* handle uniform packets for scsi type devices (scsi,atapi) */
72217 int (*generic_packet) (struct cdrom_device_info *,
72218 struct packet_command *);
72219diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
72220index 4ce9056..86caac6 100644
72221--- a/include/linux/cleancache.h
72222+++ b/include/linux/cleancache.h
72223@@ -31,7 +31,7 @@ struct cleancache_ops {
72224 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
72225 void (*invalidate_inode)(int, struct cleancache_filekey);
72226 void (*invalidate_fs)(int);
72227-};
72228+} __no_const;
72229
72230 extern struct cleancache_ops *
72231 cleancache_register_ops(struct cleancache_ops *ops);
72232diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
72233index 1186098..f87e53d 100644
72234--- a/include/linux/clk-provider.h
72235+++ b/include/linux/clk-provider.h
72236@@ -132,6 +132,7 @@ struct clk_ops {
72237 unsigned long);
72238 void (*init)(struct clk_hw *hw);
72239 };
72240+typedef struct clk_ops __no_const clk_ops_no_const;
72241
72242 /**
72243 * struct clk_init_data - holds init data that's common to all clocks and is
72244diff --git a/include/linux/compat.h b/include/linux/compat.h
72245index 7f0c1dd..206ac34 100644
72246--- a/include/linux/compat.h
72247+++ b/include/linux/compat.h
72248@@ -312,7 +312,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
72249 compat_size_t __user *len_ptr);
72250
72251 asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
72252-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
72253+asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
72254 asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
72255 asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
72256 compat_ssize_t msgsz, int msgflg);
72257@@ -419,7 +419,7 @@ extern int compat_ptrace_request(struct task_struct *child,
72258 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
72259 compat_ulong_t addr, compat_ulong_t data);
72260 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
72261- compat_long_t addr, compat_long_t data);
72262+ compat_ulong_t addr, compat_ulong_t data);
72263
72264 asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, size_t);
72265 /*
72266@@ -669,6 +669,7 @@ asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr,
72267
72268 int compat_restore_altstack(const compat_stack_t __user *uss);
72269 int __compat_save_altstack(compat_stack_t __user *, unsigned long);
72270+void __compat_save_altstack_ex(compat_stack_t __user *, unsigned long);
72271
72272 asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid,
72273 struct compat_timespec __user *interval);
72274diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
72275index 842de22..7f3a41f 100644
72276--- a/include/linux/compiler-gcc4.h
72277+++ b/include/linux/compiler-gcc4.h
72278@@ -39,9 +39,29 @@
72279 # define __compiletime_warning(message) __attribute__((warning(message)))
72280 # define __compiletime_error(message) __attribute__((error(message)))
72281 #endif /* __CHECKER__ */
72282+
72283+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
72284+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
72285+#define __bos0(ptr) __bos((ptr), 0)
72286+#define __bos1(ptr) __bos((ptr), 1)
72287 #endif /* GCC_VERSION >= 40300 */
72288
72289 #if GCC_VERSION >= 40500
72290+
72291+#ifdef CONSTIFY_PLUGIN
72292+#define __no_const __attribute__((no_const))
72293+#define __do_const __attribute__((do_const))
72294+#endif
72295+
72296+#ifdef SIZE_OVERFLOW_PLUGIN
72297+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
72298+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
72299+#endif
72300+
72301+#ifdef LATENT_ENTROPY_PLUGIN
72302+#define __latent_entropy __attribute__((latent_entropy))
72303+#endif
72304+
72305 /*
72306 * Mark a position in code as unreachable. This can be used to
72307 * suppress control flow warnings after asm blocks that transfer
72308diff --git a/include/linux/compiler.h b/include/linux/compiler.h
72309index 92669cd..1771a15 100644
72310--- a/include/linux/compiler.h
72311+++ b/include/linux/compiler.h
72312@@ -5,11 +5,14 @@
72313
72314 #ifdef __CHECKER__
72315 # define __user __attribute__((noderef, address_space(1)))
72316+# define __force_user __force __user
72317 # define __kernel __attribute__((address_space(0)))
72318+# define __force_kernel __force __kernel
72319 # define __safe __attribute__((safe))
72320 # define __force __attribute__((force))
72321 # define __nocast __attribute__((nocast))
72322 # define __iomem __attribute__((noderef, address_space(2)))
72323+# define __force_iomem __force __iomem
72324 # define __must_hold(x) __attribute__((context(x,1,1)))
72325 # define __acquires(x) __attribute__((context(x,0,1)))
72326 # define __releases(x) __attribute__((context(x,1,0)))
72327@@ -17,20 +20,37 @@
72328 # define __release(x) __context__(x,-1)
72329 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
72330 # define __percpu __attribute__((noderef, address_space(3)))
72331+# define __force_percpu __force __percpu
72332 #ifdef CONFIG_SPARSE_RCU_POINTER
72333 # define __rcu __attribute__((noderef, address_space(4)))
72334+# define __force_rcu __force __rcu
72335 #else
72336 # define __rcu
72337+# define __force_rcu
72338 #endif
72339 extern void __chk_user_ptr(const volatile void __user *);
72340 extern void __chk_io_ptr(const volatile void __iomem *);
72341 #else
72342-# define __user
72343-# define __kernel
72344+# ifdef CHECKER_PLUGIN
72345+//# define __user
72346+//# define __force_user
72347+//# define __kernel
72348+//# define __force_kernel
72349+# else
72350+# ifdef STRUCTLEAK_PLUGIN
72351+# define __user __attribute__((user))
72352+# else
72353+# define __user
72354+# endif
72355+# define __force_user
72356+# define __kernel
72357+# define __force_kernel
72358+# endif
72359 # define __safe
72360 # define __force
72361 # define __nocast
72362 # define __iomem
72363+# define __force_iomem
72364 # define __chk_user_ptr(x) (void)0
72365 # define __chk_io_ptr(x) (void)0
72366 # define __builtin_warning(x, y...) (1)
72367@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
72368 # define __release(x) (void)0
72369 # define __cond_lock(x,c) (c)
72370 # define __percpu
72371+# define __force_percpu
72372 # define __rcu
72373+# define __force_rcu
72374 #endif
72375
72376 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
72377@@ -275,6 +297,26 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
72378 # define __attribute_const__ /* unimplemented */
72379 #endif
72380
72381+#ifndef __no_const
72382+# define __no_const
72383+#endif
72384+
72385+#ifndef __do_const
72386+# define __do_const
72387+#endif
72388+
72389+#ifndef __size_overflow
72390+# define __size_overflow(...)
72391+#endif
72392+
72393+#ifndef __intentional_overflow
72394+# define __intentional_overflow(...)
72395+#endif
72396+
72397+#ifndef __latent_entropy
72398+# define __latent_entropy
72399+#endif
72400+
72401 /*
72402 * Tell gcc if a function is cold. The compiler will assume any path
72403 * directly leading to the call is unlikely.
72404@@ -284,6 +326,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
72405 #define __cold
72406 #endif
72407
72408+#ifndef __alloc_size
72409+#define __alloc_size(...)
72410+#endif
72411+
72412+#ifndef __bos
72413+#define __bos(ptr, arg)
72414+#endif
72415+
72416+#ifndef __bos0
72417+#define __bos0(ptr)
72418+#endif
72419+
72420+#ifndef __bos1
72421+#define __bos1(ptr)
72422+#endif
72423+
72424 /* Simple shorthand for a section definition */
72425 #ifndef __section
72426 # define __section(S) __attribute__ ((__section__(#S)))
72427@@ -349,7 +407,8 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
72428 * use is to mediate communication between process-level code and irq/NMI
72429 * handlers, all running on the same CPU.
72430 */
72431-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
72432+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
72433+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
72434
72435 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
72436 #ifdef CONFIG_KPROBES
72437diff --git a/include/linux/completion.h b/include/linux/completion.h
72438index 33f0280..35c6568 100644
72439--- a/include/linux/completion.h
72440+++ b/include/linux/completion.h
72441@@ -79,15 +79,15 @@ static inline void init_completion(struct completion *x)
72442 extern void wait_for_completion(struct completion *);
72443 extern void wait_for_completion_io(struct completion *);
72444 extern int wait_for_completion_interruptible(struct completion *x);
72445-extern int wait_for_completion_killable(struct completion *x);
72446+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
72447 extern unsigned long wait_for_completion_timeout(struct completion *x,
72448 unsigned long timeout);
72449 extern unsigned long wait_for_completion_io_timeout(struct completion *x,
72450 unsigned long timeout);
72451 extern long wait_for_completion_interruptible_timeout(
72452- struct completion *x, unsigned long timeout);
72453+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
72454 extern long wait_for_completion_killable_timeout(
72455- struct completion *x, unsigned long timeout);
72456+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
72457 extern bool try_wait_for_completion(struct completion *x);
72458 extern bool completion_done(struct completion *x);
72459
72460diff --git a/include/linux/configfs.h b/include/linux/configfs.h
72461index 34025df..d94bbbc 100644
72462--- a/include/linux/configfs.h
72463+++ b/include/linux/configfs.h
72464@@ -125,7 +125,7 @@ struct configfs_attribute {
72465 const char *ca_name;
72466 struct module *ca_owner;
72467 umode_t ca_mode;
72468-};
72469+} __do_const;
72470
72471 /*
72472 * Users often need to create attribute structures for their configurable
72473diff --git a/include/linux/cpu.h b/include/linux/cpu.h
72474index 9f3c7e8..a18c7b6 100644
72475--- a/include/linux/cpu.h
72476+++ b/include/linux/cpu.h
72477@@ -115,7 +115,7 @@ enum {
72478 /* Need to know about CPUs going up/down? */
72479 #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
72480 #define cpu_notifier(fn, pri) { \
72481- static struct notifier_block fn##_nb __cpuinitdata = \
72482+ static struct notifier_block fn##_nb = \
72483 { .notifier_call = fn, .priority = pri }; \
72484 register_cpu_notifier(&fn##_nb); \
72485 }
72486diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
72487index 037d36a..ca5fe6e 100644
72488--- a/include/linux/cpufreq.h
72489+++ b/include/linux/cpufreq.h
72490@@ -262,7 +262,7 @@ struct cpufreq_driver {
72491 int (*suspend) (struct cpufreq_policy *policy);
72492 int (*resume) (struct cpufreq_policy *policy);
72493 struct freq_attr **attr;
72494-};
72495+} __do_const;
72496
72497 /* flags */
72498
72499@@ -321,6 +321,7 @@ struct global_attr {
72500 ssize_t (*store)(struct kobject *a, struct attribute *b,
72501 const char *c, size_t count);
72502 };
72503+typedef struct global_attr __no_const global_attr_no_const;
72504
72505 #define define_one_global_ro(_name) \
72506 static struct global_attr _name = \
72507diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
72508index 8f04062..900239a 100644
72509--- a/include/linux/cpuidle.h
72510+++ b/include/linux/cpuidle.h
72511@@ -52,7 +52,8 @@ struct cpuidle_state {
72512 int index);
72513
72514 int (*enter_dead) (struct cpuidle_device *dev, int index);
72515-};
72516+} __do_const;
72517+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
72518
72519 /* Idle State Flags */
72520 #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
72521@@ -191,7 +192,7 @@ struct cpuidle_governor {
72522 void (*reflect) (struct cpuidle_device *dev, int index);
72523
72524 struct module *owner;
72525-};
72526+} __do_const;
72527
72528 #ifdef CONFIG_CPU_IDLE
72529
72530diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
72531index d08e4d2..95fad61 100644
72532--- a/include/linux/cpumask.h
72533+++ b/include/linux/cpumask.h
72534@@ -118,17 +118,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
72535 }
72536
72537 /* Valid inputs for n are -1 and 0. */
72538-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
72539+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
72540 {
72541 return n+1;
72542 }
72543
72544-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
72545+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
72546 {
72547 return n+1;
72548 }
72549
72550-static inline unsigned int cpumask_next_and(int n,
72551+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
72552 const struct cpumask *srcp,
72553 const struct cpumask *andp)
72554 {
72555@@ -167,7 +167,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
72556 *
72557 * Returns >= nr_cpu_ids if no further cpus set.
72558 */
72559-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
72560+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
72561 {
72562 /* -1 is a legal arg here. */
72563 if (n != -1)
72564@@ -182,7 +182,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
72565 *
72566 * Returns >= nr_cpu_ids if no further cpus unset.
72567 */
72568-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
72569+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
72570 {
72571 /* -1 is a legal arg here. */
72572 if (n != -1)
72573@@ -190,7 +190,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
72574 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
72575 }
72576
72577-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
72578+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
72579 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
72580
72581 /**
72582diff --git a/include/linux/cred.h b/include/linux/cred.h
72583index 04421e8..6bce4ef 100644
72584--- a/include/linux/cred.h
72585+++ b/include/linux/cred.h
72586@@ -194,6 +194,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
72587 static inline void validate_process_creds(void)
72588 {
72589 }
72590+static inline void validate_task_creds(struct task_struct *task)
72591+{
72592+}
72593 #endif
72594
72595 /**
72596diff --git a/include/linux/crypto.h b/include/linux/crypto.h
72597index b92eadf..b4ecdc1 100644
72598--- a/include/linux/crypto.h
72599+++ b/include/linux/crypto.h
72600@@ -373,7 +373,7 @@ struct cipher_tfm {
72601 const u8 *key, unsigned int keylen);
72602 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
72603 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
72604-};
72605+} __no_const;
72606
72607 struct hash_tfm {
72608 int (*init)(struct hash_desc *desc);
72609@@ -394,13 +394,13 @@ struct compress_tfm {
72610 int (*cot_decompress)(struct crypto_tfm *tfm,
72611 const u8 *src, unsigned int slen,
72612 u8 *dst, unsigned int *dlen);
72613-};
72614+} __no_const;
72615
72616 struct rng_tfm {
72617 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
72618 unsigned int dlen);
72619 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
72620-};
72621+} __no_const;
72622
72623 #define crt_ablkcipher crt_u.ablkcipher
72624 #define crt_aead crt_u.aead
72625diff --git a/include/linux/ctype.h b/include/linux/ctype.h
72626index 653589e..4ef254a 100644
72627--- a/include/linux/ctype.h
72628+++ b/include/linux/ctype.h
72629@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
72630 * Fast implementation of tolower() for internal usage. Do not use in your
72631 * code.
72632 */
72633-static inline char _tolower(const char c)
72634+static inline unsigned char _tolower(const unsigned char c)
72635 {
72636 return c | 0x20;
72637 }
72638diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
72639index 7925bf0..d5143d2 100644
72640--- a/include/linux/decompress/mm.h
72641+++ b/include/linux/decompress/mm.h
72642@@ -77,7 +77,7 @@ static void free(void *where)
72643 * warnings when not needed (indeed large_malloc / large_free are not
72644 * needed by inflate */
72645
72646-#define malloc(a) kmalloc(a, GFP_KERNEL)
72647+#define malloc(a) kmalloc((a), GFP_KERNEL)
72648 #define free(a) kfree(a)
72649
72650 #define large_malloc(a) vmalloc(a)
72651diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
72652index fe8c447..bdc1f33 100644
72653--- a/include/linux/devfreq.h
72654+++ b/include/linux/devfreq.h
72655@@ -114,7 +114,7 @@ struct devfreq_governor {
72656 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
72657 int (*event_handler)(struct devfreq *devfreq,
72658 unsigned int event, void *data);
72659-};
72660+} __do_const;
72661
72662 /**
72663 * struct devfreq - Device devfreq structure
72664diff --git a/include/linux/device.h b/include/linux/device.h
72665index c0a1261..dba7569 100644
72666--- a/include/linux/device.h
72667+++ b/include/linux/device.h
72668@@ -290,7 +290,7 @@ struct subsys_interface {
72669 struct list_head node;
72670 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
72671 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
72672-};
72673+} __do_const;
72674
72675 int subsys_interface_register(struct subsys_interface *sif);
72676 void subsys_interface_unregister(struct subsys_interface *sif);
72677@@ -473,7 +473,7 @@ struct device_type {
72678 void (*release)(struct device *dev);
72679
72680 const struct dev_pm_ops *pm;
72681-};
72682+} __do_const;
72683
72684 /* interface for exporting device attributes */
72685 struct device_attribute {
72686@@ -483,11 +483,12 @@ struct device_attribute {
72687 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
72688 const char *buf, size_t count);
72689 };
72690+typedef struct device_attribute __no_const device_attribute_no_const;
72691
72692 struct dev_ext_attribute {
72693 struct device_attribute attr;
72694 void *var;
72695-};
72696+} __do_const;
72697
72698 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
72699 char *buf);
72700diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
72701index 94af418..b1ca7a2 100644
72702--- a/include/linux/dma-mapping.h
72703+++ b/include/linux/dma-mapping.h
72704@@ -54,7 +54,7 @@ struct dma_map_ops {
72705 u64 (*get_required_mask)(struct device *dev);
72706 #endif
72707 int is_phys;
72708-};
72709+} __do_const;
72710
72711 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
72712
72713diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
72714index 96d3e4a..dc36433 100644
72715--- a/include/linux/dmaengine.h
72716+++ b/include/linux/dmaengine.h
72717@@ -1035,9 +1035,9 @@ struct dma_pinned_list {
72718 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
72719 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
72720
72721-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
72722+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
72723 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
72724-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
72725+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
72726 struct dma_pinned_list *pinned_list, struct page *page,
72727 unsigned int offset, size_t len);
72728
72729diff --git a/include/linux/efi.h b/include/linux/efi.h
72730index 2bc0ad7..3f7b006 100644
72731--- a/include/linux/efi.h
72732+++ b/include/linux/efi.h
72733@@ -745,6 +745,7 @@ struct efivar_operations {
72734 efi_set_variable_t *set_variable;
72735 efi_query_variable_store_t *query_variable_store;
72736 };
72737+typedef struct efivar_operations __no_const efivar_operations_no_const;
72738
72739 struct efivars {
72740 /*
72741diff --git a/include/linux/elf.h b/include/linux/elf.h
72742index 40a3c0e..4c45a38 100644
72743--- a/include/linux/elf.h
72744+++ b/include/linux/elf.h
72745@@ -24,6 +24,7 @@ extern Elf32_Dyn _DYNAMIC [];
72746 #define elf_note elf32_note
72747 #define elf_addr_t Elf32_Off
72748 #define Elf_Half Elf32_Half
72749+#define elf_dyn Elf32_Dyn
72750
72751 #else
72752
72753@@ -34,6 +35,7 @@ extern Elf64_Dyn _DYNAMIC [];
72754 #define elf_note elf64_note
72755 #define elf_addr_t Elf64_Off
72756 #define Elf_Half Elf64_Half
72757+#define elf_dyn Elf64_Dyn
72758
72759 #endif
72760
72761diff --git a/include/linux/err.h b/include/linux/err.h
72762index f2edce2..cc2082c 100644
72763--- a/include/linux/err.h
72764+++ b/include/linux/err.h
72765@@ -19,12 +19,12 @@
72766
72767 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
72768
72769-static inline void * __must_check ERR_PTR(long error)
72770+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
72771 {
72772 return (void *) error;
72773 }
72774
72775-static inline long __must_check PTR_ERR(const void *ptr)
72776+static inline long __must_check __intentional_overflow(-1) PTR_ERR(const void *ptr)
72777 {
72778 return (long) ptr;
72779 }
72780diff --git a/include/linux/extcon.h b/include/linux/extcon.h
72781index fcb51c8..bdafcf6 100644
72782--- a/include/linux/extcon.h
72783+++ b/include/linux/extcon.h
72784@@ -134,7 +134,7 @@ struct extcon_dev {
72785 /* /sys/class/extcon/.../mutually_exclusive/... */
72786 struct attribute_group attr_g_muex;
72787 struct attribute **attrs_muex;
72788- struct device_attribute *d_attrs_muex;
72789+ device_attribute_no_const *d_attrs_muex;
72790 };
72791
72792 /**
72793diff --git a/include/linux/fb.h b/include/linux/fb.h
72794index d49c60f..2834fbe 100644
72795--- a/include/linux/fb.h
72796+++ b/include/linux/fb.h
72797@@ -304,7 +304,7 @@ struct fb_ops {
72798 /* called at KDB enter and leave time to prepare the console */
72799 int (*fb_debug_enter)(struct fb_info *info);
72800 int (*fb_debug_leave)(struct fb_info *info);
72801-};
72802+} __do_const;
72803
72804 #ifdef CONFIG_FB_TILEBLITTING
72805 #define FB_TILE_CURSOR_NONE 0
72806diff --git a/include/linux/filter.h b/include/linux/filter.h
72807index f65f5a6..2f4f93a 100644
72808--- a/include/linux/filter.h
72809+++ b/include/linux/filter.h
72810@@ -20,6 +20,7 @@ struct compat_sock_fprog {
72811
72812 struct sk_buff;
72813 struct sock;
72814+struct bpf_jit_work;
72815
72816 struct sk_filter
72817 {
72818@@ -27,6 +28,9 @@ struct sk_filter
72819 unsigned int len; /* Number of filter blocks */
72820 unsigned int (*bpf_func)(const struct sk_buff *skb,
72821 const struct sock_filter *filter);
72822+#ifdef CONFIG_BPF_JIT
72823+ struct bpf_jit_work *work;
72824+#endif
72825 struct rcu_head rcu;
72826 struct sock_filter insns[0];
72827 };
72828diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
72829index 8293262..2b3b8bd 100644
72830--- a/include/linux/frontswap.h
72831+++ b/include/linux/frontswap.h
72832@@ -11,7 +11,7 @@ struct frontswap_ops {
72833 int (*load)(unsigned, pgoff_t, struct page *);
72834 void (*invalidate_page)(unsigned, pgoff_t);
72835 void (*invalidate_area)(unsigned);
72836-};
72837+} __no_const;
72838
72839 extern bool frontswap_enabled;
72840 extern struct frontswap_ops *
72841diff --git a/include/linux/fs.h b/include/linux/fs.h
72842index 65c2be2..4c53f6e 100644
72843--- a/include/linux/fs.h
72844+++ b/include/linux/fs.h
72845@@ -1543,7 +1543,8 @@ struct file_operations {
72846 long (*fallocate)(struct file *file, int mode, loff_t offset,
72847 loff_t len);
72848 int (*show_fdinfo)(struct seq_file *m, struct file *f);
72849-};
72850+} __do_const;
72851+typedef struct file_operations __no_const file_operations_no_const;
72852
72853 struct inode_operations {
72854 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
72855@@ -2688,4 +2689,14 @@ static inline void inode_has_no_xattr(struct inode *inode)
72856 inode->i_flags |= S_NOSEC;
72857 }
72858
72859+static inline bool is_sidechannel_device(const struct inode *inode)
72860+{
72861+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
72862+ umode_t mode = inode->i_mode;
72863+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
72864+#else
72865+ return false;
72866+#endif
72867+}
72868+
72869 #endif /* _LINUX_FS_H */
72870diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
72871index 2b93a9a..855d94a 100644
72872--- a/include/linux/fs_struct.h
72873+++ b/include/linux/fs_struct.h
72874@@ -6,7 +6,7 @@
72875 #include <linux/seqlock.h>
72876
72877 struct fs_struct {
72878- int users;
72879+ atomic_t users;
72880 spinlock_t lock;
72881 seqcount_t seq;
72882 int umask;
72883diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
72884index 5dfa0aa..6acf322 100644
72885--- a/include/linux/fscache-cache.h
72886+++ b/include/linux/fscache-cache.h
72887@@ -112,7 +112,7 @@ struct fscache_operation {
72888 fscache_operation_release_t release;
72889 };
72890
72891-extern atomic_t fscache_op_debug_id;
72892+extern atomic_unchecked_t fscache_op_debug_id;
72893 extern void fscache_op_work_func(struct work_struct *work);
72894
72895 extern void fscache_enqueue_operation(struct fscache_operation *);
72896@@ -134,7 +134,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
72897 INIT_WORK(&op->work, fscache_op_work_func);
72898 atomic_set(&op->usage, 1);
72899 op->state = FSCACHE_OP_ST_INITIALISED;
72900- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
72901+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
72902 op->processor = processor;
72903 op->release = release;
72904 INIT_LIST_HEAD(&op->pend_link);
72905diff --git a/include/linux/fscache.h b/include/linux/fscache.h
72906index 7a08623..4c07b0f 100644
72907--- a/include/linux/fscache.h
72908+++ b/include/linux/fscache.h
72909@@ -152,7 +152,7 @@ struct fscache_cookie_def {
72910 * - this is mandatory for any object that may have data
72911 */
72912 void (*now_uncached)(void *cookie_netfs_data);
72913-};
72914+} __do_const;
72915
72916 /*
72917 * fscache cached network filesystem type
72918diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
72919index a78680a..87bd73e 100644
72920--- a/include/linux/fsnotify.h
72921+++ b/include/linux/fsnotify.h
72922@@ -195,6 +195,9 @@ static inline void fsnotify_access(struct file *file)
72923 struct inode *inode = path->dentry->d_inode;
72924 __u32 mask = FS_ACCESS;
72925
72926+ if (is_sidechannel_device(inode))
72927+ return;
72928+
72929 if (S_ISDIR(inode->i_mode))
72930 mask |= FS_ISDIR;
72931
72932@@ -213,6 +216,9 @@ static inline void fsnotify_modify(struct file *file)
72933 struct inode *inode = path->dentry->d_inode;
72934 __u32 mask = FS_MODIFY;
72935
72936+ if (is_sidechannel_device(inode))
72937+ return;
72938+
72939 if (S_ISDIR(inode->i_mode))
72940 mask |= FS_ISDIR;
72941
72942@@ -315,7 +321,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
72943 */
72944 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
72945 {
72946- return kstrdup(name, GFP_KERNEL);
72947+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
72948 }
72949
72950 /*
72951diff --git a/include/linux/genhd.h b/include/linux/genhd.h
72952index 9f3c275..911b591 100644
72953--- a/include/linux/genhd.h
72954+++ b/include/linux/genhd.h
72955@@ -194,7 +194,7 @@ struct gendisk {
72956 struct kobject *slave_dir;
72957
72958 struct timer_rand_state *random;
72959- atomic_t sync_io; /* RAID */
72960+ atomic_unchecked_t sync_io; /* RAID */
72961 struct disk_events *ev;
72962 #ifdef CONFIG_BLK_DEV_INTEGRITY
72963 struct blk_integrity *integrity;
72964diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
72965index 023bc34..b02b46a 100644
72966--- a/include/linux/genl_magic_func.h
72967+++ b/include/linux/genl_magic_func.h
72968@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
72969 },
72970
72971 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
72972-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
72973+static struct genl_ops ZZZ_genl_ops[] = {
72974 #include GENL_MAGIC_INCLUDE_FILE
72975 };
72976
72977diff --git a/include/linux/gfp.h b/include/linux/gfp.h
72978index 0f615eb..5c3832f 100644
72979--- a/include/linux/gfp.h
72980+++ b/include/linux/gfp.h
72981@@ -35,6 +35,13 @@ struct vm_area_struct;
72982 #define ___GFP_NO_KSWAPD 0x400000u
72983 #define ___GFP_OTHER_NODE 0x800000u
72984 #define ___GFP_WRITE 0x1000000u
72985+
72986+#ifdef CONFIG_PAX_USERCOPY_SLABS
72987+#define ___GFP_USERCOPY 0x2000000u
72988+#else
72989+#define ___GFP_USERCOPY 0
72990+#endif
72991+
72992 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
72993
72994 /*
72995@@ -92,6 +99,7 @@ struct vm_area_struct;
72996 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
72997 #define __GFP_KMEMCG ((__force gfp_t)___GFP_KMEMCG) /* Allocation comes from a memcg-accounted resource */
72998 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
72999+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
73000
73001 /*
73002 * This may seem redundant, but it's a way of annotating false positives vs.
73003@@ -99,7 +107,7 @@ struct vm_area_struct;
73004 */
73005 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
73006
73007-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
73008+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
73009 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
73010
73011 /* This equals 0, but use constants in case they ever change */
73012@@ -153,6 +161,8 @@ struct vm_area_struct;
73013 /* 4GB DMA on some platforms */
73014 #define GFP_DMA32 __GFP_DMA32
73015
73016+#define GFP_USERCOPY __GFP_USERCOPY
73017+
73018 /* Convert GFP flags to their corresponding migrate type */
73019 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
73020 {
73021diff --git a/include/linux/gracl.h b/include/linux/gracl.h
73022new file mode 100644
73023index 0000000..ebe6d72
73024--- /dev/null
73025+++ b/include/linux/gracl.h
73026@@ -0,0 +1,319 @@
73027+#ifndef GR_ACL_H
73028+#define GR_ACL_H
73029+
73030+#include <linux/grdefs.h>
73031+#include <linux/resource.h>
73032+#include <linux/capability.h>
73033+#include <linux/dcache.h>
73034+#include <asm/resource.h>
73035+
73036+/* Major status information */
73037+
73038+#define GR_VERSION "grsecurity 2.9.1"
73039+#define GRSECURITY_VERSION 0x2901
73040+
73041+enum {
73042+ GR_SHUTDOWN = 0,
73043+ GR_ENABLE = 1,
73044+ GR_SPROLE = 2,
73045+ GR_RELOAD = 3,
73046+ GR_SEGVMOD = 4,
73047+ GR_STATUS = 5,
73048+ GR_UNSPROLE = 6,
73049+ GR_PASSSET = 7,
73050+ GR_SPROLEPAM = 8,
73051+};
73052+
73053+/* Password setup definitions
73054+ * kernel/grhash.c */
73055+enum {
73056+ GR_PW_LEN = 128,
73057+ GR_SALT_LEN = 16,
73058+ GR_SHA_LEN = 32,
73059+};
73060+
73061+enum {
73062+ GR_SPROLE_LEN = 64,
73063+};
73064+
73065+enum {
73066+ GR_NO_GLOB = 0,
73067+ GR_REG_GLOB,
73068+ GR_CREATE_GLOB
73069+};
73070+
73071+#define GR_NLIMITS 32
73072+
73073+/* Begin Data Structures */
73074+
73075+struct sprole_pw {
73076+ unsigned char *rolename;
73077+ unsigned char salt[GR_SALT_LEN];
73078+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
73079+};
73080+
73081+struct name_entry {
73082+ __u32 key;
73083+ ino_t inode;
73084+ dev_t device;
73085+ char *name;
73086+ __u16 len;
73087+ __u8 deleted;
73088+ struct name_entry *prev;
73089+ struct name_entry *next;
73090+};
73091+
73092+struct inodev_entry {
73093+ struct name_entry *nentry;
73094+ struct inodev_entry *prev;
73095+ struct inodev_entry *next;
73096+};
73097+
73098+struct acl_role_db {
73099+ struct acl_role_label **r_hash;
73100+ __u32 r_size;
73101+};
73102+
73103+struct inodev_db {
73104+ struct inodev_entry **i_hash;
73105+ __u32 i_size;
73106+};
73107+
73108+struct name_db {
73109+ struct name_entry **n_hash;
73110+ __u32 n_size;
73111+};
73112+
73113+struct crash_uid {
73114+ uid_t uid;
73115+ unsigned long expires;
73116+};
73117+
73118+struct gr_hash_struct {
73119+ void **table;
73120+ void **nametable;
73121+ void *first;
73122+ __u32 table_size;
73123+ __u32 used_size;
73124+ int type;
73125+};
73126+
73127+/* Userspace Grsecurity ACL data structures */
73128+
73129+struct acl_subject_label {
73130+ char *filename;
73131+ ino_t inode;
73132+ dev_t device;
73133+ __u32 mode;
73134+ kernel_cap_t cap_mask;
73135+ kernel_cap_t cap_lower;
73136+ kernel_cap_t cap_invert_audit;
73137+
73138+ struct rlimit res[GR_NLIMITS];
73139+ __u32 resmask;
73140+
73141+ __u8 user_trans_type;
73142+ __u8 group_trans_type;
73143+ uid_t *user_transitions;
73144+ gid_t *group_transitions;
73145+ __u16 user_trans_num;
73146+ __u16 group_trans_num;
73147+
73148+ __u32 sock_families[2];
73149+ __u32 ip_proto[8];
73150+ __u32 ip_type;
73151+ struct acl_ip_label **ips;
73152+ __u32 ip_num;
73153+ __u32 inaddr_any_override;
73154+
73155+ __u32 crashes;
73156+ unsigned long expires;
73157+
73158+ struct acl_subject_label *parent_subject;
73159+ struct gr_hash_struct *hash;
73160+ struct acl_subject_label *prev;
73161+ struct acl_subject_label *next;
73162+
73163+ struct acl_object_label **obj_hash;
73164+ __u32 obj_hash_size;
73165+ __u16 pax_flags;
73166+};
73167+
73168+struct role_allowed_ip {
73169+ __u32 addr;
73170+ __u32 netmask;
73171+
73172+ struct role_allowed_ip *prev;
73173+ struct role_allowed_ip *next;
73174+};
73175+
73176+struct role_transition {
73177+ char *rolename;
73178+
73179+ struct role_transition *prev;
73180+ struct role_transition *next;
73181+};
73182+
73183+struct acl_role_label {
73184+ char *rolename;
73185+ uid_t uidgid;
73186+ __u16 roletype;
73187+
73188+ __u16 auth_attempts;
73189+ unsigned long expires;
73190+
73191+ struct acl_subject_label *root_label;
73192+ struct gr_hash_struct *hash;
73193+
73194+ struct acl_role_label *prev;
73195+ struct acl_role_label *next;
73196+
73197+ struct role_transition *transitions;
73198+ struct role_allowed_ip *allowed_ips;
73199+ uid_t *domain_children;
73200+ __u16 domain_child_num;
73201+
73202+ umode_t umask;
73203+
73204+ struct acl_subject_label **subj_hash;
73205+ __u32 subj_hash_size;
73206+};
73207+
73208+struct user_acl_role_db {
73209+ struct acl_role_label **r_table;
73210+ __u32 num_pointers; /* Number of allocations to track */
73211+ __u32 num_roles; /* Number of roles */
73212+ __u32 num_domain_children; /* Number of domain children */
73213+ __u32 num_subjects; /* Number of subjects */
73214+ __u32 num_objects; /* Number of objects */
73215+};
73216+
73217+struct acl_object_label {
73218+ char *filename;
73219+ ino_t inode;
73220+ dev_t device;
73221+ __u32 mode;
73222+
73223+ struct acl_subject_label *nested;
73224+ struct acl_object_label *globbed;
73225+
73226+ /* next two structures not used */
73227+
73228+ struct acl_object_label *prev;
73229+ struct acl_object_label *next;
73230+};
73231+
73232+struct acl_ip_label {
73233+ char *iface;
73234+ __u32 addr;
73235+ __u32 netmask;
73236+ __u16 low, high;
73237+ __u8 mode;
73238+ __u32 type;
73239+ __u32 proto[8];
73240+
73241+ /* next two structures not used */
73242+
73243+ struct acl_ip_label *prev;
73244+ struct acl_ip_label *next;
73245+};
73246+
73247+struct gr_arg {
73248+ struct user_acl_role_db role_db;
73249+ unsigned char pw[GR_PW_LEN];
73250+ unsigned char salt[GR_SALT_LEN];
73251+ unsigned char sum[GR_SHA_LEN];
73252+ unsigned char sp_role[GR_SPROLE_LEN];
73253+ struct sprole_pw *sprole_pws;
73254+ dev_t segv_device;
73255+ ino_t segv_inode;
73256+ uid_t segv_uid;
73257+ __u16 num_sprole_pws;
73258+ __u16 mode;
73259+};
73260+
73261+struct gr_arg_wrapper {
73262+ struct gr_arg *arg;
73263+ __u32 version;
73264+ __u32 size;
73265+};
73266+
73267+struct subject_map {
73268+ struct acl_subject_label *user;
73269+ struct acl_subject_label *kernel;
73270+ struct subject_map *prev;
73271+ struct subject_map *next;
73272+};
73273+
73274+struct acl_subj_map_db {
73275+ struct subject_map **s_hash;
73276+ __u32 s_size;
73277+};
73278+
73279+/* End Data Structures Section */
73280+
73281+/* Hash functions generated by empirical testing by Brad Spengler
73282+ Makes good use of the low bits of the inode. Generally 0-1 times
73283+ in loop for successful match. 0-3 for unsuccessful match.
73284+ Shift/add algorithm with modulus of table size and an XOR*/
73285+
73286+static __inline__ unsigned int
73287+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
73288+{
73289+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
73290+}
73291+
73292+ static __inline__ unsigned int
73293+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
73294+{
73295+ return ((const unsigned long)userp % sz);
73296+}
73297+
73298+static __inline__ unsigned int
73299+gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
73300+{
73301+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
73302+}
73303+
73304+static __inline__ unsigned int
73305+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
73306+{
73307+ return full_name_hash((const unsigned char *)name, len) % sz;
73308+}
73309+
73310+#define FOR_EACH_ROLE_START(role) \
73311+ role = role_list; \
73312+ while (role) {
73313+
73314+#define FOR_EACH_ROLE_END(role) \
73315+ role = role->prev; \
73316+ }
73317+
73318+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
73319+ subj = NULL; \
73320+ iter = 0; \
73321+ while (iter < role->subj_hash_size) { \
73322+ if (subj == NULL) \
73323+ subj = role->subj_hash[iter]; \
73324+ if (subj == NULL) { \
73325+ iter++; \
73326+ continue; \
73327+ }
73328+
73329+#define FOR_EACH_SUBJECT_END(subj,iter) \
73330+ subj = subj->next; \
73331+ if (subj == NULL) \
73332+ iter++; \
73333+ }
73334+
73335+
73336+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
73337+ subj = role->hash->first; \
73338+ while (subj != NULL) {
73339+
73340+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
73341+ subj = subj->next; \
73342+ }
73343+
73344+#endif
73345+
73346diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h
73347new file mode 100644
73348index 0000000..33ebd1f
73349--- /dev/null
73350+++ b/include/linux/gracl_compat.h
73351@@ -0,0 +1,156 @@
73352+#ifndef GR_ACL_COMPAT_H
73353+#define GR_ACL_COMPAT_H
73354+
73355+#include <linux/resource.h>
73356+#include <asm/resource.h>
73357+
73358+struct sprole_pw_compat {
73359+ compat_uptr_t rolename;
73360+ unsigned char salt[GR_SALT_LEN];
73361+ unsigned char sum[GR_SHA_LEN];
73362+};
73363+
73364+struct gr_hash_struct_compat {
73365+ compat_uptr_t table;
73366+ compat_uptr_t nametable;
73367+ compat_uptr_t first;
73368+ __u32 table_size;
73369+ __u32 used_size;
73370+ int type;
73371+};
73372+
73373+struct acl_subject_label_compat {
73374+ compat_uptr_t filename;
73375+ compat_ino_t inode;
73376+ __u32 device;
73377+ __u32 mode;
73378+ kernel_cap_t cap_mask;
73379+ kernel_cap_t cap_lower;
73380+ kernel_cap_t cap_invert_audit;
73381+
73382+ struct compat_rlimit res[GR_NLIMITS];
73383+ __u32 resmask;
73384+
73385+ __u8 user_trans_type;
73386+ __u8 group_trans_type;
73387+ compat_uptr_t user_transitions;
73388+ compat_uptr_t group_transitions;
73389+ __u16 user_trans_num;
73390+ __u16 group_trans_num;
73391+
73392+ __u32 sock_families[2];
73393+ __u32 ip_proto[8];
73394+ __u32 ip_type;
73395+ compat_uptr_t ips;
73396+ __u32 ip_num;
73397+ __u32 inaddr_any_override;
73398+
73399+ __u32 crashes;
73400+ compat_ulong_t expires;
73401+
73402+ compat_uptr_t parent_subject;
73403+ compat_uptr_t hash;
73404+ compat_uptr_t prev;
73405+ compat_uptr_t next;
73406+
73407+ compat_uptr_t obj_hash;
73408+ __u32 obj_hash_size;
73409+ __u16 pax_flags;
73410+};
73411+
73412+struct role_allowed_ip_compat {
73413+ __u32 addr;
73414+ __u32 netmask;
73415+
73416+ compat_uptr_t prev;
73417+ compat_uptr_t next;
73418+};
73419+
73420+struct role_transition_compat {
73421+ compat_uptr_t rolename;
73422+
73423+ compat_uptr_t prev;
73424+ compat_uptr_t next;
73425+};
73426+
73427+struct acl_role_label_compat {
73428+ compat_uptr_t rolename;
73429+ uid_t uidgid;
73430+ __u16 roletype;
73431+
73432+ __u16 auth_attempts;
73433+ compat_ulong_t expires;
73434+
73435+ compat_uptr_t root_label;
73436+ compat_uptr_t hash;
73437+
73438+ compat_uptr_t prev;
73439+ compat_uptr_t next;
73440+
73441+ compat_uptr_t transitions;
73442+ compat_uptr_t allowed_ips;
73443+ compat_uptr_t domain_children;
73444+ __u16 domain_child_num;
73445+
73446+ umode_t umask;
73447+
73448+ compat_uptr_t subj_hash;
73449+ __u32 subj_hash_size;
73450+};
73451+
73452+struct user_acl_role_db_compat {
73453+ compat_uptr_t r_table;
73454+ __u32 num_pointers;
73455+ __u32 num_roles;
73456+ __u32 num_domain_children;
73457+ __u32 num_subjects;
73458+ __u32 num_objects;
73459+};
73460+
73461+struct acl_object_label_compat {
73462+ compat_uptr_t filename;
73463+ compat_ino_t inode;
73464+ __u32 device;
73465+ __u32 mode;
73466+
73467+ compat_uptr_t nested;
73468+ compat_uptr_t globbed;
73469+
73470+ compat_uptr_t prev;
73471+ compat_uptr_t next;
73472+};
73473+
73474+struct acl_ip_label_compat {
73475+ compat_uptr_t iface;
73476+ __u32 addr;
73477+ __u32 netmask;
73478+ __u16 low, high;
73479+ __u8 mode;
73480+ __u32 type;
73481+ __u32 proto[8];
73482+
73483+ compat_uptr_t prev;
73484+ compat_uptr_t next;
73485+};
73486+
73487+struct gr_arg_compat {
73488+ struct user_acl_role_db_compat role_db;
73489+ unsigned char pw[GR_PW_LEN];
73490+ unsigned char salt[GR_SALT_LEN];
73491+ unsigned char sum[GR_SHA_LEN];
73492+ unsigned char sp_role[GR_SPROLE_LEN];
73493+ compat_uptr_t sprole_pws;
73494+ __u32 segv_device;
73495+ compat_ino_t segv_inode;
73496+ uid_t segv_uid;
73497+ __u16 num_sprole_pws;
73498+ __u16 mode;
73499+};
73500+
73501+struct gr_arg_wrapper_compat {
73502+ compat_uptr_t arg;
73503+ __u32 version;
73504+ __u32 size;
73505+};
73506+
73507+#endif
73508diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
73509new file mode 100644
73510index 0000000..323ecf2
73511--- /dev/null
73512+++ b/include/linux/gralloc.h
73513@@ -0,0 +1,9 @@
73514+#ifndef __GRALLOC_H
73515+#define __GRALLOC_H
73516+
73517+void acl_free_all(void);
73518+int acl_alloc_stack_init(unsigned long size);
73519+void *acl_alloc(unsigned long len);
73520+void *acl_alloc_num(unsigned long num, unsigned long len);
73521+
73522+#endif
73523diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
73524new file mode 100644
73525index 0000000..be66033
73526--- /dev/null
73527+++ b/include/linux/grdefs.h
73528@@ -0,0 +1,140 @@
73529+#ifndef GRDEFS_H
73530+#define GRDEFS_H
73531+
73532+/* Begin grsecurity status declarations */
73533+
73534+enum {
73535+ GR_READY = 0x01,
73536+ GR_STATUS_INIT = 0x00 // disabled state
73537+};
73538+
73539+/* Begin ACL declarations */
73540+
73541+/* Role flags */
73542+
73543+enum {
73544+ GR_ROLE_USER = 0x0001,
73545+ GR_ROLE_GROUP = 0x0002,
73546+ GR_ROLE_DEFAULT = 0x0004,
73547+ GR_ROLE_SPECIAL = 0x0008,
73548+ GR_ROLE_AUTH = 0x0010,
73549+ GR_ROLE_NOPW = 0x0020,
73550+ GR_ROLE_GOD = 0x0040,
73551+ GR_ROLE_LEARN = 0x0080,
73552+ GR_ROLE_TPE = 0x0100,
73553+ GR_ROLE_DOMAIN = 0x0200,
73554+ GR_ROLE_PAM = 0x0400,
73555+ GR_ROLE_PERSIST = 0x0800
73556+};
73557+
73558+/* ACL Subject and Object mode flags */
73559+enum {
73560+ GR_DELETED = 0x80000000
73561+};
73562+
73563+/* ACL Object-only mode flags */
73564+enum {
73565+ GR_READ = 0x00000001,
73566+ GR_APPEND = 0x00000002,
73567+ GR_WRITE = 0x00000004,
73568+ GR_EXEC = 0x00000008,
73569+ GR_FIND = 0x00000010,
73570+ GR_INHERIT = 0x00000020,
73571+ GR_SETID = 0x00000040,
73572+ GR_CREATE = 0x00000080,
73573+ GR_DELETE = 0x00000100,
73574+ GR_LINK = 0x00000200,
73575+ GR_AUDIT_READ = 0x00000400,
73576+ GR_AUDIT_APPEND = 0x00000800,
73577+ GR_AUDIT_WRITE = 0x00001000,
73578+ GR_AUDIT_EXEC = 0x00002000,
73579+ GR_AUDIT_FIND = 0x00004000,
73580+ GR_AUDIT_INHERIT= 0x00008000,
73581+ GR_AUDIT_SETID = 0x00010000,
73582+ GR_AUDIT_CREATE = 0x00020000,
73583+ GR_AUDIT_DELETE = 0x00040000,
73584+ GR_AUDIT_LINK = 0x00080000,
73585+ GR_PTRACERD = 0x00100000,
73586+ GR_NOPTRACE = 0x00200000,
73587+ GR_SUPPRESS = 0x00400000,
73588+ GR_NOLEARN = 0x00800000,
73589+ GR_INIT_TRANSFER= 0x01000000
73590+};
73591+
73592+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
73593+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
73594+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
73595+
73596+/* ACL subject-only mode flags */
73597+enum {
73598+ GR_KILL = 0x00000001,
73599+ GR_VIEW = 0x00000002,
73600+ GR_PROTECTED = 0x00000004,
73601+ GR_LEARN = 0x00000008,
73602+ GR_OVERRIDE = 0x00000010,
73603+ /* just a placeholder, this mode is only used in userspace */
73604+ GR_DUMMY = 0x00000020,
73605+ GR_PROTSHM = 0x00000040,
73606+ GR_KILLPROC = 0x00000080,
73607+ GR_KILLIPPROC = 0x00000100,
73608+ /* just a placeholder, this mode is only used in userspace */
73609+ GR_NOTROJAN = 0x00000200,
73610+ GR_PROTPROCFD = 0x00000400,
73611+ GR_PROCACCT = 0x00000800,
73612+ GR_RELAXPTRACE = 0x00001000,
73613+ //GR_NESTED = 0x00002000,
73614+ GR_INHERITLEARN = 0x00004000,
73615+ GR_PROCFIND = 0x00008000,
73616+ GR_POVERRIDE = 0x00010000,
73617+ GR_KERNELAUTH = 0x00020000,
73618+ GR_ATSECURE = 0x00040000,
73619+ GR_SHMEXEC = 0x00080000
73620+};
73621+
73622+enum {
73623+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
73624+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
73625+ GR_PAX_ENABLE_MPROTECT = 0x0004,
73626+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
73627+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
73628+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
73629+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
73630+ GR_PAX_DISABLE_MPROTECT = 0x0400,
73631+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
73632+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
73633+};
73634+
73635+enum {
73636+ GR_ID_USER = 0x01,
73637+ GR_ID_GROUP = 0x02,
73638+};
73639+
73640+enum {
73641+ GR_ID_ALLOW = 0x01,
73642+ GR_ID_DENY = 0x02,
73643+};
73644+
73645+#define GR_CRASH_RES 31
73646+#define GR_UIDTABLE_MAX 500
73647+
73648+/* begin resource learning section */
73649+enum {
73650+ GR_RLIM_CPU_BUMP = 60,
73651+ GR_RLIM_FSIZE_BUMP = 50000,
73652+ GR_RLIM_DATA_BUMP = 10000,
73653+ GR_RLIM_STACK_BUMP = 1000,
73654+ GR_RLIM_CORE_BUMP = 10000,
73655+ GR_RLIM_RSS_BUMP = 500000,
73656+ GR_RLIM_NPROC_BUMP = 1,
73657+ GR_RLIM_NOFILE_BUMP = 5,
73658+ GR_RLIM_MEMLOCK_BUMP = 50000,
73659+ GR_RLIM_AS_BUMP = 500000,
73660+ GR_RLIM_LOCKS_BUMP = 2,
73661+ GR_RLIM_SIGPENDING_BUMP = 5,
73662+ GR_RLIM_MSGQUEUE_BUMP = 10000,
73663+ GR_RLIM_NICE_BUMP = 1,
73664+ GR_RLIM_RTPRIO_BUMP = 1,
73665+ GR_RLIM_RTTIME_BUMP = 1000000
73666+};
73667+
73668+#endif
73669diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
73670new file mode 100644
e2b79cd1 73671index 0000000..e337683
bb5f0bf8
AF
73672--- /dev/null
73673+++ b/include/linux/grinternal.h
e2b79cd1 73674@@ -0,0 +1,229 @@
bb5f0bf8
AF
73675+#ifndef __GRINTERNAL_H
73676+#define __GRINTERNAL_H
73677+
73678+#ifdef CONFIG_GRKERNSEC
73679+
73680+#include <linux/fs.h>
73681+#include <linux/mnt_namespace.h>
73682+#include <linux/nsproxy.h>
73683+#include <linux/gracl.h>
73684+#include <linux/grdefs.h>
73685+#include <linux/grmsg.h>
73686+
73687+void gr_add_learn_entry(const char *fmt, ...)
73688+ __attribute__ ((format (printf, 1, 2)));
73689+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
73690+ const struct vfsmount *mnt);
73691+__u32 gr_check_create(const struct dentry *new_dentry,
73692+ const struct dentry *parent,
73693+ const struct vfsmount *mnt, const __u32 mode);
73694+int gr_check_protected_task(const struct task_struct *task);
73695+__u32 to_gr_audit(const __u32 reqmode);
73696+int gr_set_acls(const int type);
73697+int gr_apply_subject_to_task(struct task_struct *task);
73698+int gr_acl_is_enabled(void);
73699+char gr_roletype_to_char(void);
73700+
73701+void gr_handle_alertkill(struct task_struct *task);
73702+char *gr_to_filename(const struct dentry *dentry,
73703+ const struct vfsmount *mnt);
73704+char *gr_to_filename1(const struct dentry *dentry,
73705+ const struct vfsmount *mnt);
73706+char *gr_to_filename2(const struct dentry *dentry,
73707+ const struct vfsmount *mnt);
73708+char *gr_to_filename3(const struct dentry *dentry,
73709+ const struct vfsmount *mnt);
73710+
73711+extern int grsec_enable_ptrace_readexec;
73712+extern int grsec_enable_harden_ptrace;
73713+extern int grsec_enable_link;
73714+extern int grsec_enable_fifo;
73715+extern int grsec_enable_execve;
73716+extern int grsec_enable_shm;
73717+extern int grsec_enable_execlog;
73718+extern int grsec_enable_signal;
73719+extern int grsec_enable_audit_ptrace;
73720+extern int grsec_enable_forkfail;
73721+extern int grsec_enable_time;
73722+extern int grsec_enable_rofs;
e2b79cd1 73723+extern int grsec_deny_new_usb;
bb5f0bf8
AF
73724+extern int grsec_enable_chroot_shmat;
73725+extern int grsec_enable_chroot_mount;
73726+extern int grsec_enable_chroot_double;
73727+extern int grsec_enable_chroot_pivot;
73728+extern int grsec_enable_chroot_chdir;
73729+extern int grsec_enable_chroot_chmod;
73730+extern int grsec_enable_chroot_mknod;
73731+extern int grsec_enable_chroot_fchdir;
73732+extern int grsec_enable_chroot_nice;
73733+extern int grsec_enable_chroot_execlog;
73734+extern int grsec_enable_chroot_caps;
73735+extern int grsec_enable_chroot_sysctl;
73736+extern int grsec_enable_chroot_unix;
73737+extern int grsec_enable_symlinkown;
73738+extern kgid_t grsec_symlinkown_gid;
73739+extern int grsec_enable_tpe;
73740+extern kgid_t grsec_tpe_gid;
73741+extern int grsec_enable_tpe_all;
73742+extern int grsec_enable_tpe_invert;
73743+extern int grsec_enable_socket_all;
73744+extern kgid_t grsec_socket_all_gid;
73745+extern int grsec_enable_socket_client;
73746+extern kgid_t grsec_socket_client_gid;
73747+extern int grsec_enable_socket_server;
73748+extern kgid_t grsec_socket_server_gid;
73749+extern kgid_t grsec_audit_gid;
73750+extern int grsec_enable_group;
73751+extern int grsec_enable_log_rwxmaps;
73752+extern int grsec_enable_mount;
73753+extern int grsec_enable_chdir;
73754+extern int grsec_resource_logging;
73755+extern int grsec_enable_blackhole;
73756+extern int grsec_lastack_retries;
73757+extern int grsec_enable_brute;
73758+extern int grsec_lock;
73759+
73760+extern spinlock_t grsec_alert_lock;
73761+extern unsigned long grsec_alert_wtime;
73762+extern unsigned long grsec_alert_fyet;
73763+
73764+extern spinlock_t grsec_audit_lock;
73765+
73766+extern rwlock_t grsec_exec_file_lock;
73767+
73768+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
73769+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
73770+ (tsk)->exec_file->f_path.mnt) : "/")
73771+
73772+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
73773+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
73774+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
73775+
73776+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
73777+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
73778+ (tsk)->exec_file->f_path.mnt) : "/")
73779+
73780+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
73781+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
73782+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
73783+
73784+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
73785+
73786+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
73787+
73788+static inline bool gr_is_same_file(const struct file *file1, const struct file *file2)
73789+{
73790+ if (file1 && file2) {
73791+ const struct inode *inode1 = file1->f_path.dentry->d_inode;
73792+ const struct inode *inode2 = file2->f_path.dentry->d_inode;
73793+ if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev)
73794+ return true;
73795+ }
73796+
73797+ return false;
73798+}
73799+
73800+#define GR_CHROOT_CAPS {{ \
73801+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
73802+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
73803+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
73804+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
73805+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
73806+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
73807+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
73808+
73809+#define security_learn(normal_msg,args...) \
73810+({ \
73811+ read_lock(&grsec_exec_file_lock); \
73812+ gr_add_learn_entry(normal_msg "\n", ## args); \
73813+ read_unlock(&grsec_exec_file_lock); \
73814+})
73815+
73816+enum {
73817+ GR_DO_AUDIT,
73818+ GR_DONT_AUDIT,
73819+ /* used for non-audit messages that we shouldn't kill the task on */
73820+ GR_DONT_AUDIT_GOOD
73821+};
73822+
73823+enum {
73824+ GR_TTYSNIFF,
73825+ GR_RBAC,
73826+ GR_RBAC_STR,
73827+ GR_STR_RBAC,
73828+ GR_RBAC_MODE2,
73829+ GR_RBAC_MODE3,
73830+ GR_FILENAME,
73831+ GR_SYSCTL_HIDDEN,
73832+ GR_NOARGS,
73833+ GR_ONE_INT,
73834+ GR_ONE_INT_TWO_STR,
73835+ GR_ONE_STR,
73836+ GR_STR_INT,
73837+ GR_TWO_STR_INT,
73838+ GR_TWO_INT,
73839+ GR_TWO_U64,
73840+ GR_THREE_INT,
73841+ GR_FIVE_INT_TWO_STR,
73842+ GR_TWO_STR,
73843+ GR_THREE_STR,
73844+ GR_FOUR_STR,
73845+ GR_STR_FILENAME,
73846+ GR_FILENAME_STR,
73847+ GR_FILENAME_TWO_INT,
73848+ GR_FILENAME_TWO_INT_STR,
73849+ GR_TEXTREL,
73850+ GR_PTRACE,
73851+ GR_RESOURCE,
73852+ GR_CAP,
73853+ GR_SIG,
73854+ GR_SIG2,
73855+ GR_CRASH1,
73856+ GR_CRASH2,
73857+ GR_PSACCT,
73858+ GR_RWXMAP,
73859+ GR_RWXMAPVMA
73860+};
73861+
73862+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
73863+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
73864+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
73865+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
73866+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
73867+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
73868+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
73869+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
73870+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
73871+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
73872+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
73873+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
73874+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
73875+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
73876+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
73877+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
73878+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
73879+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
73880+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
73881+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
73882+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
73883+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
73884+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
73885+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
73886+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
73887+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
73888+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
73889+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
73890+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
73891+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
73892+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
73893+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
73894+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
73895+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
73896+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
73897+#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str)
73898+
73899+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
73900+
73901+#endif
73902+
73903+#endif
73904diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
73905new file mode 100644
73906index 0000000..a4396b5
73907--- /dev/null
73908+++ b/include/linux/grmsg.h
73909@@ -0,0 +1,113 @@
73910+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
73911+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
73912+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
73913+#define GR_STOPMOD_MSG "denied modification of module state by "
73914+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
73915+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
73916+#define GR_IOPERM_MSG "denied use of ioperm() by "
73917+#define GR_IOPL_MSG "denied use of iopl() by "
73918+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
73919+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
73920+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
73921+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
73922+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
73923+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
73924+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
73925+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
73926+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
73927+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
73928+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
73929+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
73930+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
73931+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
73932+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
73933+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
73934+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
73935+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
73936+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
73937+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
73938+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
73939+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
73940+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
73941+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
73942+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
73943+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
73944+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
73945+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
73946+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
73947+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
73948+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
73949+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
73950+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
73951+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
73952+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
73953+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
73954+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
73955+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
73956+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
73957+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
73958+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
73959+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
73960+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
73961+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
73962+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
73963+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
73964+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
73965+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
73966+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
73967+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
73968+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
73969+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
73970+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
73971+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
73972+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
73973+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
73974+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
73975+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
73976+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
73977+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
73978+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
73979+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
73980+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
73981+#define GR_FAILFORK_MSG "failed fork with errno %s by "
73982+#define GR_NICE_CHROOT_MSG "denied priority change by "
73983+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
73984+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
73985+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
73986+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
73987+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
73988+#define GR_TIME_MSG "time set by "
73989+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
73990+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
73991+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
73992+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
73993+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
73994+#define GR_BIND_MSG "denied bind() by "
73995+#define GR_CONNECT_MSG "denied connect() by "
73996+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
73997+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
73998+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
73999+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
74000+#define GR_CAP_ACL_MSG "use of %s denied for "
74001+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
74002+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
74003+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
74004+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
74005+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
74006+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
74007+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
74008+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
74009+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
74010+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
74011+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
74012+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
74013+#define GR_TEXTREL_AUDIT_MSG "denied text relocation in %.950s, VMA:0x%08lx 0x%08lx by "
74014+#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by "
74015+#define GR_VM86_MSG "denied use of vm86 by "
74016+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
74017+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
74018+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
74019+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
74020+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
74021+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
74022+#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
74023diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
74024new file mode 100644
e2b79cd1 74025index 0000000..d6f5a21
bb5f0bf8
AF
74026--- /dev/null
74027+++ b/include/linux/grsecurity.h
e2b79cd1 74028@@ -0,0 +1,244 @@
bb5f0bf8
AF
74029+#ifndef GR_SECURITY_H
74030+#define GR_SECURITY_H
74031+#include <linux/fs.h>
74032+#include <linux/fs_struct.h>
74033+#include <linux/binfmts.h>
74034+#include <linux/gracl.h>
74035+
74036+/* notify of brain-dead configs */
74037+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
74038+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
74039+#endif
74040+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
74041+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
74042+#endif
74043+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
74044+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
74045+#endif
74046+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
74047+#error "CONFIG_PAX enabled, but no PaX options are enabled."
74048+#endif
74049+
e2b79cd1
AF
74050+int gr_handle_new_usb(void);
74051+
bb5f0bf8
AF
74052+void gr_handle_brute_attach(unsigned long mm_flags);
74053+void gr_handle_brute_check(void);
74054+void gr_handle_kernel_exploit(void);
74055+
74056+char gr_roletype_to_char(void);
74057+
74058+int gr_acl_enable_at_secure(void);
74059+
74060+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
74061+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
74062+
74063+void gr_del_task_from_ip_table(struct task_struct *p);
74064+
74065+int gr_pid_is_chrooted(struct task_struct *p);
74066+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
74067+int gr_handle_chroot_nice(void);
74068+int gr_handle_chroot_sysctl(const int op);
74069+int gr_handle_chroot_setpriority(struct task_struct *p,
74070+ const int niceval);
74071+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
74072+int gr_handle_chroot_chroot(const struct dentry *dentry,
74073+ const struct vfsmount *mnt);
74074+void gr_handle_chroot_chdir(const struct path *path);
74075+int gr_handle_chroot_chmod(const struct dentry *dentry,
74076+ const struct vfsmount *mnt, const int mode);
74077+int gr_handle_chroot_mknod(const struct dentry *dentry,
74078+ const struct vfsmount *mnt, const int mode);
74079+int gr_handle_chroot_mount(const struct dentry *dentry,
74080+ const struct vfsmount *mnt,
74081+ const char *dev_name);
74082+int gr_handle_chroot_pivot(void);
74083+int gr_handle_chroot_unix(const pid_t pid);
74084+
74085+int gr_handle_rawio(const struct inode *inode);
74086+
74087+void gr_handle_ioperm(void);
74088+void gr_handle_iopl(void);
74089+
74090+umode_t gr_acl_umask(void);
74091+
74092+int gr_tpe_allow(const struct file *file);
74093+
74094+void gr_set_chroot_entries(struct task_struct *task, const struct path *path);
74095+void gr_clear_chroot_entries(struct task_struct *task);
74096+
74097+void gr_log_forkfail(const int retval);
74098+void gr_log_timechange(void);
74099+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
74100+void gr_log_chdir(const struct dentry *dentry,
74101+ const struct vfsmount *mnt);
74102+void gr_log_chroot_exec(const struct dentry *dentry,
74103+ const struct vfsmount *mnt);
74104+void gr_log_remount(const char *devname, const int retval);
74105+void gr_log_unmount(const char *devname, const int retval);
74106+void gr_log_mount(const char *from, const char *to, const int retval);
74107+void gr_log_textrel(struct vm_area_struct *vma);
74108+void gr_log_ptgnustack(struct file *file);
74109+void gr_log_rwxmmap(struct file *file);
74110+void gr_log_rwxmprotect(struct vm_area_struct *vma);
74111+
74112+int gr_handle_follow_link(const struct inode *parent,
74113+ const struct inode *inode,
74114+ const struct dentry *dentry,
74115+ const struct vfsmount *mnt);
74116+int gr_handle_fifo(const struct dentry *dentry,
74117+ const struct vfsmount *mnt,
74118+ const struct dentry *dir, const int flag,
74119+ const int acc_mode);
74120+int gr_handle_hardlink(const struct dentry *dentry,
74121+ const struct vfsmount *mnt,
74122+ struct inode *inode,
74123+ const int mode, const struct filename *to);
74124+
74125+int gr_is_capable(const int cap);
74126+int gr_is_capable_nolog(const int cap);
74127+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
74128+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
74129+
74130+void gr_copy_label(struct task_struct *tsk);
74131+void gr_handle_crash(struct task_struct *task, const int sig);
74132+int gr_handle_signal(const struct task_struct *p, const int sig);
74133+int gr_check_crash_uid(const kuid_t uid);
74134+int gr_check_protected_task(const struct task_struct *task);
74135+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
74136+int gr_acl_handle_mmap(const struct file *file,
74137+ const unsigned long prot);
74138+int gr_acl_handle_mprotect(const struct file *file,
74139+ const unsigned long prot);
74140+int gr_check_hidden_task(const struct task_struct *tsk);
74141+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
74142+ const struct vfsmount *mnt);
74143+__u32 gr_acl_handle_utime(const struct dentry *dentry,
74144+ const struct vfsmount *mnt);
74145+__u32 gr_acl_handle_access(const struct dentry *dentry,
74146+ const struct vfsmount *mnt, const int fmode);
74147+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
74148+ const struct vfsmount *mnt, umode_t *mode);
74149+__u32 gr_acl_handle_chown(const struct dentry *dentry,
74150+ const struct vfsmount *mnt);
74151+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
74152+ const struct vfsmount *mnt);
74153+int gr_handle_ptrace(struct task_struct *task, const long request);
74154+int gr_handle_proc_ptrace(struct task_struct *task);
74155+__u32 gr_acl_handle_execve(const struct dentry *dentry,
74156+ const struct vfsmount *mnt);
74157+int gr_check_crash_exec(const struct file *filp);
74158+int gr_acl_is_enabled(void);
74159+void gr_set_kernel_label(struct task_struct *task);
74160+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
74161+ const kgid_t gid);
74162+int gr_set_proc_label(const struct dentry *dentry,
74163+ const struct vfsmount *mnt,
74164+ const int unsafe_flags);
74165+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
74166+ const struct vfsmount *mnt);
74167+__u32 gr_acl_handle_open(const struct dentry *dentry,
74168+ const struct vfsmount *mnt, int acc_mode);
74169+__u32 gr_acl_handle_creat(const struct dentry *dentry,
74170+ const struct dentry *p_dentry,
74171+ const struct vfsmount *p_mnt,
74172+ int open_flags, int acc_mode, const int imode);
74173+void gr_handle_create(const struct dentry *dentry,
74174+ const struct vfsmount *mnt);
74175+void gr_handle_proc_create(const struct dentry *dentry,
74176+ const struct inode *inode);
74177+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
74178+ const struct dentry *parent_dentry,
74179+ const struct vfsmount *parent_mnt,
74180+ const int mode);
74181+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
74182+ const struct dentry *parent_dentry,
74183+ const struct vfsmount *parent_mnt);
74184+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
74185+ const struct vfsmount *mnt);
74186+void gr_handle_delete(const ino_t ino, const dev_t dev);
74187+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
74188+ const struct vfsmount *mnt);
74189+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
74190+ const struct dentry *parent_dentry,
74191+ const struct vfsmount *parent_mnt,
74192+ const struct filename *from);
74193+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
74194+ const struct dentry *parent_dentry,
74195+ const struct vfsmount *parent_mnt,
74196+ const struct dentry *old_dentry,
74197+ const struct vfsmount *old_mnt, const struct filename *to);
74198+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
74199+int gr_acl_handle_rename(struct dentry *new_dentry,
74200+ struct dentry *parent_dentry,
74201+ const struct vfsmount *parent_mnt,
74202+ struct dentry *old_dentry,
74203+ struct inode *old_parent_inode,
74204+ struct vfsmount *old_mnt, const struct filename *newname);
74205+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
74206+ struct dentry *old_dentry,
74207+ struct dentry *new_dentry,
74208+ struct vfsmount *mnt, const __u8 replace);
74209+__u32 gr_check_link(const struct dentry *new_dentry,
74210+ const struct dentry *parent_dentry,
74211+ const struct vfsmount *parent_mnt,
74212+ const struct dentry *old_dentry,
74213+ const struct vfsmount *old_mnt);
74214+int gr_acl_handle_filldir(const struct file *file, const char *name,
74215+ const unsigned int namelen, const ino_t ino);
74216+
74217+__u32 gr_acl_handle_unix(const struct dentry *dentry,
74218+ const struct vfsmount *mnt);
74219+void gr_acl_handle_exit(void);
74220+void gr_acl_handle_psacct(struct task_struct *task, const long code);
74221+int gr_acl_handle_procpidmem(const struct task_struct *task);
74222+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
74223+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
74224+void gr_audit_ptrace(struct task_struct *task);
74225+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
74226+void gr_put_exec_file(struct task_struct *task);
74227+
74228+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
74229+
74230+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
74231+extern void gr_learn_resource(const struct task_struct *task, const int res,
74232+ const unsigned long wanted, const int gt);
74233+#else
74234+static inline void gr_learn_resource(const struct task_struct *task, const int res,
74235+ const unsigned long wanted, const int gt)
74236+{
74237+}
74238+#endif
74239+
74240+#ifdef CONFIG_GRKERNSEC_RESLOG
74241+extern void gr_log_resource(const struct task_struct *task, const int res,
74242+ const unsigned long wanted, const int gt);
74243+#else
74244+static inline void gr_log_resource(const struct task_struct *task, const int res,
74245+ const unsigned long wanted, const int gt)
74246+{
74247+}
74248+#endif
74249+
74250+#ifdef CONFIG_GRKERNSEC
74251+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
74252+void gr_handle_vm86(void);
74253+void gr_handle_mem_readwrite(u64 from, u64 to);
74254+
74255+void gr_log_badprocpid(const char *entry);
74256+
74257+extern int grsec_enable_dmesg;
74258+extern int grsec_disable_privio;
74259+
74260+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
74261+extern kgid_t grsec_proc_gid;
74262+#endif
74263+
74264+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
74265+extern int grsec_enable_chroot_findtask;
74266+#endif
74267+#ifdef CONFIG_GRKERNSEC_SETXID
74268+extern int grsec_enable_setxid;
74269+#endif
74270+#endif
74271+
74272+#endif
74273diff --git a/include/linux/grsock.h b/include/linux/grsock.h
74274new file mode 100644
74275index 0000000..e7ffaaf
74276--- /dev/null
74277+++ b/include/linux/grsock.h
74278@@ -0,0 +1,19 @@
74279+#ifndef __GRSOCK_H
74280+#define __GRSOCK_H
74281+
74282+extern void gr_attach_curr_ip(const struct sock *sk);
74283+extern int gr_handle_sock_all(const int family, const int type,
74284+ const int protocol);
74285+extern int gr_handle_sock_server(const struct sockaddr *sck);
74286+extern int gr_handle_sock_server_other(const struct sock *sck);
74287+extern int gr_handle_sock_client(const struct sockaddr *sck);
74288+extern int gr_search_connect(struct socket * sock,
74289+ struct sockaddr_in * addr);
74290+extern int gr_search_bind(struct socket * sock,
74291+ struct sockaddr_in * addr);
74292+extern int gr_search_listen(struct socket * sock);
74293+extern int gr_search_accept(struct socket * sock);
74294+extern int gr_search_socket(const int domain, const int type,
74295+ const int protocol);
74296+
74297+#endif
e2b79cd1
AF
74298diff --git a/include/linux/hid.h b/include/linux/hid.h
74299index 0c48991..76e41d8 100644
74300--- a/include/linux/hid.h
74301+++ b/include/linux/hid.h
74302@@ -393,10 +393,12 @@ struct hid_report {
74303 struct hid_device *device; /* associated device */
74304 };
74305
74306+#define HID_MAX_IDS 256
74307+
74308 struct hid_report_enum {
74309 unsigned numbered;
74310 struct list_head report_list;
74311- struct hid_report *report_id_hash[256];
74312+ struct hid_report *report_id_hash[HID_MAX_IDS];
74313 };
74314
74315 #define HID_REPORT_TYPES 3
74316@@ -747,6 +749,10 @@ void hid_output_report(struct hid_report *report, __u8 *data);
74317 struct hid_device *hid_allocate_device(void);
74318 struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id);
74319 int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size);
74320+struct hid_report *hid_validate_report(struct hid_device *hid,
74321+ unsigned int type, unsigned int id,
74322+ unsigned int fields,
74323+ unsigned int report_counts);
74324 int hid_open_report(struct hid_device *device);
74325 int hid_check_keys_pressed(struct hid_device *hid);
74326 int hid_connect(struct hid_device *hid, unsigned int connect_mask);
bb5f0bf8
AF
74327diff --git a/include/linux/highmem.h b/include/linux/highmem.h
74328index 7fb31da..08b5114 100644
74329--- a/include/linux/highmem.h
74330+++ b/include/linux/highmem.h
74331@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
74332 kunmap_atomic(kaddr);
74333 }
74334
74335+static inline void sanitize_highpage(struct page *page)
74336+{
74337+ void *kaddr;
74338+ unsigned long flags;
74339+
74340+ local_irq_save(flags);
74341+ kaddr = kmap_atomic(page);
74342+ clear_page(kaddr);
74343+ kunmap_atomic(kaddr);
74344+ local_irq_restore(flags);
74345+}
74346+
74347 static inline void zero_user_segments(struct page *page,
74348 unsigned start1, unsigned end1,
74349 unsigned start2, unsigned end2)
74350diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
74351index 1c7b89a..7f52502 100644
74352--- a/include/linux/hwmon-sysfs.h
74353+++ b/include/linux/hwmon-sysfs.h
74354@@ -25,7 +25,8 @@
74355 struct sensor_device_attribute{
74356 struct device_attribute dev_attr;
74357 int index;
74358-};
74359+} __do_const;
74360+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
74361 #define to_sensor_dev_attr(_dev_attr) \
74362 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
74363
74364@@ -41,7 +42,7 @@ struct sensor_device_attribute_2 {
74365 struct device_attribute dev_attr;
74366 u8 index;
74367 u8 nr;
74368-};
74369+} __do_const;
74370 #define to_sensor_dev_attr_2(_dev_attr) \
74371 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
74372
74373diff --git a/include/linux/i2c.h b/include/linux/i2c.h
74374index e988fa9..ff9f17e 100644
74375--- a/include/linux/i2c.h
74376+++ b/include/linux/i2c.h
74377@@ -366,6 +366,7 @@ struct i2c_algorithm {
74378 /* To determine what the adapter supports */
74379 u32 (*functionality) (struct i2c_adapter *);
74380 };
74381+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
74382
74383 /**
74384 * struct i2c_bus_recovery_info - I2C bus recovery information
74385diff --git a/include/linux/i2o.h b/include/linux/i2o.h
74386index d23c3c2..eb63c81 100644
74387--- a/include/linux/i2o.h
74388+++ b/include/linux/i2o.h
74389@@ -565,7 +565,7 @@ struct i2o_controller {
74390 struct i2o_device *exec; /* Executive */
74391 #if BITS_PER_LONG == 64
74392 spinlock_t context_list_lock; /* lock for context_list */
74393- atomic_t context_list_counter; /* needed for unique contexts */
74394+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
74395 struct list_head context_list; /* list of context id's
74396 and pointers */
74397 #endif
74398diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
74399index aff7ad8..3942bbd 100644
74400--- a/include/linux/if_pppox.h
74401+++ b/include/linux/if_pppox.h
74402@@ -76,7 +76,7 @@ struct pppox_proto {
74403 int (*ioctl)(struct socket *sock, unsigned int cmd,
74404 unsigned long arg);
74405 struct module *owner;
74406-};
74407+} __do_const;
74408
74409 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
74410 extern void unregister_pppox_proto(int proto_num);
74411diff --git a/include/linux/init.h b/include/linux/init.h
74412index 8618147..0821126 100644
74413--- a/include/linux/init.h
74414+++ b/include/linux/init.h
74415@@ -39,9 +39,36 @@
74416 * Also note, that this data cannot be "const".
74417 */
74418
74419+#ifdef MODULE
74420+#define add_init_latent_entropy
74421+#define add_devinit_latent_entropy
74422+#define add_cpuinit_latent_entropy
74423+#define add_meminit_latent_entropy
74424+#else
74425+#define add_init_latent_entropy __latent_entropy
74426+
74427+#ifdef CONFIG_HOTPLUG
74428+#define add_devinit_latent_entropy
74429+#else
74430+#define add_devinit_latent_entropy __latent_entropy
74431+#endif
74432+
74433+#ifdef CONFIG_HOTPLUG_CPU
74434+#define add_cpuinit_latent_entropy
74435+#else
74436+#define add_cpuinit_latent_entropy __latent_entropy
74437+#endif
74438+
74439+#ifdef CONFIG_MEMORY_HOTPLUG
74440+#define add_meminit_latent_entropy
74441+#else
74442+#define add_meminit_latent_entropy __latent_entropy
74443+#endif
74444+#endif
74445+
74446 /* These are for everybody (although not all archs will actually
74447 discard it in modules) */
74448-#define __init __section(.init.text) __cold notrace
74449+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
74450 #define __initdata __section(.init.data)
74451 #define __initconst __constsection(.init.rodata)
74452 #define __exitdata __section(.exit.data)
74453@@ -94,7 +121,7 @@
74454 #define __exit __section(.exit.text) __exitused __cold notrace
74455
74456 /* Used for HOTPLUG_CPU */
74457-#define __cpuinit __section(.cpuinit.text) __cold notrace
74458+#define __cpuinit __section(.cpuinit.text) __cold notrace add_cpuinit_latent_entropy
74459 #define __cpuinitdata __section(.cpuinit.data)
74460 #define __cpuinitconst __constsection(.cpuinit.rodata)
74461 #define __cpuexit __section(.cpuexit.text) __exitused __cold notrace
74462@@ -102,7 +129,7 @@
74463 #define __cpuexitconst __constsection(.cpuexit.rodata)
74464
74465 /* Used for MEMORY_HOTPLUG */
74466-#define __meminit __section(.meminit.text) __cold notrace
74467+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
74468 #define __meminitdata __section(.meminit.data)
74469 #define __meminitconst __constsection(.meminit.rodata)
74470 #define __memexit __section(.memexit.text) __exitused __cold notrace
74471diff --git a/include/linux/init_task.h b/include/linux/init_task.h
74472index 5cd0f09..c9f67cc 100644
74473--- a/include/linux/init_task.h
74474+++ b/include/linux/init_task.h
74475@@ -154,6 +154,12 @@ extern struct task_group root_task_group;
74476
74477 #define INIT_TASK_COMM "swapper"
74478
74479+#ifdef CONFIG_X86
74480+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
74481+#else
74482+#define INIT_TASK_THREAD_INFO
74483+#endif
74484+
74485 /*
74486 * INIT_TASK is used to set up the first task table, touch at
74487 * your own risk!. Base=0, limit=0x1fffff (=2MB)
74488@@ -193,6 +199,7 @@ extern struct task_group root_task_group;
74489 RCU_POINTER_INITIALIZER(cred, &init_cred), \
74490 .comm = INIT_TASK_COMM, \
74491 .thread = INIT_THREAD, \
74492+ INIT_TASK_THREAD_INFO \
74493 .fs = &init_fs, \
74494 .files = &init_files, \
74495 .signal = &init_signals, \
74496diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
74497index 5fa5afe..ac55b25 100644
74498--- a/include/linux/interrupt.h
74499+++ b/include/linux/interrupt.h
74500@@ -430,7 +430,7 @@ enum
74501 /* map softirq index to softirq name. update 'softirq_to_name' in
74502 * kernel/softirq.c when adding a new softirq.
74503 */
74504-extern char *softirq_to_name[NR_SOFTIRQS];
74505+extern const char * const softirq_to_name[NR_SOFTIRQS];
74506
74507 /* softirq mask and active fields moved to irq_cpustat_t in
74508 * asm/hardirq.h to get better cache usage. KAO
74509@@ -438,12 +438,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
74510
74511 struct softirq_action
74512 {
74513- void (*action)(struct softirq_action *);
74514-};
74515+ void (*action)(void);
74516+} __no_const;
74517
74518 asmlinkage void do_softirq(void);
74519 asmlinkage void __do_softirq(void);
74520-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
74521+extern void open_softirq(int nr, void (*action)(void));
74522 extern void softirq_init(void);
74523 extern void __raise_softirq_irqoff(unsigned int nr);
74524
74525diff --git a/include/linux/iommu.h b/include/linux/iommu.h
74526index 3aeb730..2177f39 100644
74527--- a/include/linux/iommu.h
74528+++ b/include/linux/iommu.h
74529@@ -113,7 +113,7 @@ struct iommu_ops {
74530 u32 (*domain_get_windows)(struct iommu_domain *domain);
74531
74532 unsigned long pgsize_bitmap;
74533-};
74534+} __do_const;
74535
74536 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
74537 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
74538diff --git a/include/linux/ioport.h b/include/linux/ioport.h
74539index 89b7c24..382af74 100644
74540--- a/include/linux/ioport.h
74541+++ b/include/linux/ioport.h
74542@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
74543 int adjust_resource(struct resource *res, resource_size_t start,
74544 resource_size_t size);
74545 resource_size_t resource_alignment(struct resource *res);
74546-static inline resource_size_t resource_size(const struct resource *res)
74547+static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
74548 {
74549 return res->end - res->start + 1;
74550 }
74551diff --git a/include/linux/irq.h b/include/linux/irq.h
74552index bc4e066..50468a9 100644
74553--- a/include/linux/irq.h
74554+++ b/include/linux/irq.h
74555@@ -328,7 +328,8 @@ struct irq_chip {
74556 void (*irq_print_chip)(struct irq_data *data, struct seq_file *p);
74557
74558 unsigned long flags;
74559-};
74560+} __do_const;
74561+typedef struct irq_chip __no_const irq_chip_no_const;
74562
74563 /*
74564 * irq_chip specific flags
74565diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
74566index 3e203eb..3fe68d0 100644
74567--- a/include/linux/irqchip/arm-gic.h
74568+++ b/include/linux/irqchip/arm-gic.h
74569@@ -59,9 +59,11 @@
74570
74571 #ifndef __ASSEMBLY__
74572
74573+#include <linux/irq.h>
74574+
74575 struct device_node;
74576
74577-extern struct irq_chip gic_arch_extn;
74578+extern irq_chip_no_const gic_arch_extn;
74579
74580 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
74581 u32 offset, struct device_node *);
74582diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
74583index 6883e19..e854fcb 100644
74584--- a/include/linux/kallsyms.h
74585+++ b/include/linux/kallsyms.h
74586@@ -15,7 +15,8 @@
74587
74588 struct module;
74589
74590-#ifdef CONFIG_KALLSYMS
74591+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
74592+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
74593 /* Lookup the address for a symbol. Returns 0 if not found. */
74594 unsigned long kallsyms_lookup_name(const char *name);
74595
74596@@ -106,6 +107,21 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
74597 /* Stupid that this does nothing, but I didn't create this mess. */
74598 #define __print_symbol(fmt, addr)
74599 #endif /*CONFIG_KALLSYMS*/
74600+#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or
74601+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
74602+extern unsigned long kallsyms_lookup_name(const char *name);
74603+extern void __print_symbol(const char *fmt, unsigned long address);
74604+extern int sprint_backtrace(char *buffer, unsigned long address);
74605+extern int sprint_symbol(char *buffer, unsigned long address);
74606+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
74607+const char *kallsyms_lookup(unsigned long addr,
74608+ unsigned long *symbolsize,
74609+ unsigned long *offset,
74610+ char **modname, char *namebuf);
74611+extern int kallsyms_lookup_size_offset(unsigned long addr,
74612+ unsigned long *symbolsize,
74613+ unsigned long *offset);
74614+#endif
74615
74616 /* This macro allows us to keep printk typechecking */
74617 static __printf(1, 2)
74618diff --git a/include/linux/key-type.h b/include/linux/key-type.h
74619index 518a53a..5e28358 100644
74620--- a/include/linux/key-type.h
74621+++ b/include/linux/key-type.h
74622@@ -125,7 +125,7 @@ struct key_type {
74623 /* internal fields */
74624 struct list_head link; /* link in types list */
74625 struct lock_class_key lock_class; /* key->sem lock class */
74626-};
74627+} __do_const;
74628
74629 extern struct key_type key_type_keyring;
74630
74631diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
74632index c6e091b..a940adf 100644
74633--- a/include/linux/kgdb.h
74634+++ b/include/linux/kgdb.h
74635@@ -52,7 +52,7 @@ extern int kgdb_connected;
74636 extern int kgdb_io_module_registered;
74637
74638 extern atomic_t kgdb_setting_breakpoint;
74639-extern atomic_t kgdb_cpu_doing_single_step;
74640+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
74641
74642 extern struct task_struct *kgdb_usethread;
74643 extern struct task_struct *kgdb_contthread;
74644@@ -254,7 +254,7 @@ struct kgdb_arch {
74645 void (*correct_hw_break)(void);
74646
74647 void (*enable_nmi)(bool on);
74648-};
74649+} __do_const;
74650
74651 /**
74652 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
74653@@ -279,7 +279,7 @@ struct kgdb_io {
74654 void (*pre_exception) (void);
74655 void (*post_exception) (void);
74656 int is_console;
74657-};
74658+} __do_const;
74659
74660 extern struct kgdb_arch arch_kgdb_ops;
74661
74662diff --git a/include/linux/kmod.h b/include/linux/kmod.h
74663index 0555cc6..b16a7a4 100644
74664--- a/include/linux/kmod.h
74665+++ b/include/linux/kmod.h
74666@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
74667 * usually useless though. */
74668 extern __printf(2, 3)
74669 int __request_module(bool wait, const char *name, ...);
74670+extern __printf(3, 4)
74671+int ___request_module(bool wait, char *param_name, const char *name, ...);
74672 #define request_module(mod...) __request_module(true, mod)
74673 #define request_module_nowait(mod...) __request_module(false, mod)
74674 #define try_then_request_module(x, mod...) \
74675diff --git a/include/linux/kobject.h b/include/linux/kobject.h
74676index 939b112..ed6ed51 100644
74677--- a/include/linux/kobject.h
74678+++ b/include/linux/kobject.h
74679@@ -111,7 +111,7 @@ struct kobj_type {
74680 struct attribute **default_attrs;
74681 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
74682 const void *(*namespace)(struct kobject *kobj);
74683-};
74684+} __do_const;
74685
74686 struct kobj_uevent_env {
74687 char *envp[UEVENT_NUM_ENVP];
74688@@ -134,6 +134,7 @@ struct kobj_attribute {
74689 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
74690 const char *buf, size_t count);
74691 };
74692+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
74693
74694 extern const struct sysfs_ops kobj_sysfs_ops;
74695
74696diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
74697index f66b065..c2c29b4 100644
74698--- a/include/linux/kobject_ns.h
74699+++ b/include/linux/kobject_ns.h
74700@@ -43,7 +43,7 @@ struct kobj_ns_type_operations {
74701 const void *(*netlink_ns)(struct sock *sk);
74702 const void *(*initial_ns)(void);
74703 void (*drop_ns)(void *);
74704-};
74705+} __do_const;
74706
74707 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
74708 int kobj_ns_type_registered(enum kobj_ns_type type);
74709diff --git a/include/linux/kref.h b/include/linux/kref.h
74710index 484604d..0f6c5b6 100644
74711--- a/include/linux/kref.h
74712+++ b/include/linux/kref.h
74713@@ -68,7 +68,7 @@ static inline void kref_get(struct kref *kref)
74714 static inline int kref_sub(struct kref *kref, unsigned int count,
74715 void (*release)(struct kref *kref))
74716 {
74717- WARN_ON(release == NULL);
74718+ BUG_ON(release == NULL);
74719
74720 if (atomic_sub_and_test((int) count, &kref->refcount)) {
74721 release(kref);
74722diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
74723index 8db53cf..c21121d 100644
74724--- a/include/linux/kvm_host.h
74725+++ b/include/linux/kvm_host.h
74726@@ -444,7 +444,7 @@ static inline void kvm_irqfd_exit(void)
74727 {
74728 }
74729 #endif
74730-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
74731+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
74732 struct module *module);
74733 void kvm_exit(void);
74734
74735@@ -616,7 +616,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
74736 struct kvm_guest_debug *dbg);
74737 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
74738
74739-int kvm_arch_init(void *opaque);
74740+int kvm_arch_init(const void *opaque);
74741 void kvm_arch_exit(void);
74742
74743 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
74744diff --git a/include/linux/libata.h b/include/linux/libata.h
74745index eae7a05..2cdd875 100644
74746--- a/include/linux/libata.h
74747+++ b/include/linux/libata.h
74748@@ -919,7 +919,7 @@ struct ata_port_operations {
74749 * fields must be pointers.
74750 */
74751 const struct ata_port_operations *inherits;
74752-};
74753+} __do_const;
74754
74755 struct ata_port_info {
74756 unsigned long flags;
74757diff --git a/include/linux/list.h b/include/linux/list.h
74758index b83e565..baa6c1d 100644
74759--- a/include/linux/list.h
74760+++ b/include/linux/list.h
74761@@ -112,6 +112,19 @@ extern void __list_del_entry(struct list_head *entry);
74762 extern void list_del(struct list_head *entry);
74763 #endif
74764
74765+extern void __pax_list_add(struct list_head *new,
74766+ struct list_head *prev,
74767+ struct list_head *next);
74768+static inline void pax_list_add(struct list_head *new, struct list_head *head)
74769+{
74770+ __pax_list_add(new, head, head->next);
74771+}
74772+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
74773+{
74774+ __pax_list_add(new, head->prev, head);
74775+}
74776+extern void pax_list_del(struct list_head *entry);
74777+
74778 /**
74779 * list_replace - replace old entry by new one
74780 * @old : the element to be replaced
74781@@ -145,6 +158,8 @@ static inline void list_del_init(struct list_head *entry)
74782 INIT_LIST_HEAD(entry);
74783 }
74784
74785+extern void pax_list_del_init(struct list_head *entry);
74786+
74787 /**
74788 * list_move - delete from one list and add as another's head
74789 * @list: the entry to move
74790diff --git a/include/linux/math64.h b/include/linux/math64.h
74791index 2913b86..8dcbb1e 100644
74792--- a/include/linux/math64.h
74793+++ b/include/linux/math64.h
74794@@ -15,7 +15,7 @@
74795 * This is commonly provided by 32bit archs to provide an optimized 64bit
74796 * divide.
74797 */
74798-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
74799+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
74800 {
74801 *remainder = dividend % divisor;
74802 return dividend / divisor;
74803@@ -33,7 +33,7 @@ static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
74804 /**
74805 * div64_u64 - unsigned 64bit divide with 64bit divisor
74806 */
74807-static inline u64 div64_u64(u64 dividend, u64 divisor)
74808+static inline u64 __intentional_overflow(0) div64_u64(u64 dividend, u64 divisor)
74809 {
74810 return dividend / divisor;
74811 }
74812@@ -52,7 +52,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
74813 #define div64_ul(x, y) div_u64((x), (y))
74814
74815 #ifndef div_u64_rem
74816-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
74817+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
74818 {
74819 *remainder = do_div(dividend, divisor);
74820 return dividend;
74821@@ -81,7 +81,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
74822 * divide.
74823 */
74824 #ifndef div_u64
74825-static inline u64 div_u64(u64 dividend, u32 divisor)
74826+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
74827 {
74828 u32 remainder;
74829 return div_u64_rem(dividend, divisor, &remainder);
74830diff --git a/include/linux/mm.h b/include/linux/mm.h
74831index e0c8528..bcf0c29 100644
74832--- a/include/linux/mm.h
74833+++ b/include/linux/mm.h
74834@@ -104,6 +104,11 @@ extern unsigned int kobjsize(const void *objp);
74835 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
74836 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
74837 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
74838+
74839+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
74840+#define VM_PAGEEXEC 0x02000000 /* vma->vm_page_prot needs special handling */
74841+#endif
74842+
74843 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
74844
74845 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
74846@@ -205,8 +210,8 @@ struct vm_operations_struct {
74847 /* called by access_process_vm when get_user_pages() fails, typically
74848 * for use by special VMAs that can switch between memory and hardware
74849 */
74850- int (*access)(struct vm_area_struct *vma, unsigned long addr,
74851- void *buf, int len, int write);
74852+ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
74853+ void *buf, size_t len, int write);
74854 #ifdef CONFIG_NUMA
74855 /*
74856 * set_policy() op must add a reference to any non-NULL @new mempolicy
74857@@ -236,6 +241,7 @@ struct vm_operations_struct {
74858 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
74859 unsigned long size, pgoff_t pgoff);
74860 };
74861+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
74862
74863 struct mmu_gather;
74864 struct inode;
74865@@ -980,8 +986,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
74866 unsigned long *pfn);
74867 int follow_phys(struct vm_area_struct *vma, unsigned long address,
74868 unsigned int flags, unsigned long *prot, resource_size_t *phys);
74869-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
74870- void *buf, int len, int write);
74871+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
74872+ void *buf, size_t len, int write);
74873
74874 static inline void unmap_shared_mapping_range(struct address_space *mapping,
74875 loff_t const holebegin, loff_t const holelen)
74876@@ -1020,9 +1026,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
74877 }
74878 #endif
74879
74880-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
74881-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
74882- void *buf, int len, int write);
74883+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
74884+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
74885+ void *buf, size_t len, int write);
74886
74887 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
74888 unsigned long start, unsigned long nr_pages,
74889@@ -1053,34 +1059,6 @@ int set_page_dirty(struct page *page);
74890 int set_page_dirty_lock(struct page *page);
74891 int clear_page_dirty_for_io(struct page *page);
74892
74893-/* Is the vma a continuation of the stack vma above it? */
74894-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
74895-{
74896- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
74897-}
74898-
74899-static inline int stack_guard_page_start(struct vm_area_struct *vma,
74900- unsigned long addr)
74901-{
74902- return (vma->vm_flags & VM_GROWSDOWN) &&
74903- (vma->vm_start == addr) &&
74904- !vma_growsdown(vma->vm_prev, addr);
74905-}
74906-
74907-/* Is the vma a continuation of the stack vma below it? */
74908-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
74909-{
74910- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
74911-}
74912-
74913-static inline int stack_guard_page_end(struct vm_area_struct *vma,
74914- unsigned long addr)
74915-{
74916- return (vma->vm_flags & VM_GROWSUP) &&
74917- (vma->vm_end == addr) &&
74918- !vma_growsup(vma->vm_next, addr);
74919-}
74920-
74921 extern pid_t
74922 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
74923
74924@@ -1180,6 +1158,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
74925 }
74926 #endif
74927
74928+#ifdef CONFIG_MMU
74929+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
74930+#else
74931+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
74932+{
74933+ return __pgprot(0);
74934+}
74935+#endif
74936+
74937 int vma_wants_writenotify(struct vm_area_struct *vma);
74938
74939 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
74940@@ -1198,8 +1185,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
74941 {
74942 return 0;
74943 }
74944+
74945+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
74946+ unsigned long address)
74947+{
74948+ return 0;
74949+}
74950 #else
74951 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
74952+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
74953 #endif
74954
74955 #ifdef __PAGETABLE_PMD_FOLDED
74956@@ -1208,8 +1202,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
74957 {
74958 return 0;
74959 }
74960+
74961+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
74962+ unsigned long address)
74963+{
74964+ return 0;
74965+}
74966 #else
74967 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
74968+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
74969 #endif
74970
74971 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
74972@@ -1227,11 +1228,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
74973 NULL: pud_offset(pgd, address);
74974 }
74975
74976+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
74977+{
74978+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
74979+ NULL: pud_offset(pgd, address);
74980+}
74981+
74982 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
74983 {
74984 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
74985 NULL: pmd_offset(pud, address);
74986 }
74987+
74988+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
74989+{
74990+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
74991+ NULL: pmd_offset(pud, address);
74992+}
74993 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
74994
74995 #if USE_SPLIT_PTLOCKS
74996@@ -1517,6 +1530,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
74997 unsigned long len, unsigned long prot, unsigned long flags,
74998 unsigned long pgoff, unsigned long *populate);
74999 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
75000+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
75001
75002 #ifdef CONFIG_MMU
75003 extern int __mm_populate(unsigned long addr, unsigned long len,
75004@@ -1545,10 +1559,11 @@ struct vm_unmapped_area_info {
75005 unsigned long high_limit;
75006 unsigned long align_mask;
75007 unsigned long align_offset;
75008+ unsigned long threadstack_offset;
75009 };
75010
75011-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
75012-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
75013+extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info);
75014+extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info);
75015
75016 /*
75017 * Search for an unmapped address range.
75018@@ -1560,7 +1575,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
75019 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
75020 */
75021 static inline unsigned long
75022-vm_unmapped_area(struct vm_unmapped_area_info *info)
75023+vm_unmapped_area(const struct vm_unmapped_area_info *info)
75024 {
75025 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
75026 return unmapped_area(info);
75027@@ -1623,6 +1638,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
75028 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
75029 struct vm_area_struct **pprev);
75030
75031+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
75032+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
75033+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
75034+
75035 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
75036 NULL if none. Assume start_addr < end_addr. */
75037 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
75038@@ -1651,15 +1670,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
75039 return vma;
75040 }
75041
75042-#ifdef CONFIG_MMU
75043-pgprot_t vm_get_page_prot(unsigned long vm_flags);
75044-#else
75045-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
75046-{
75047- return __pgprot(0);
75048-}
75049-#endif
75050-
75051 #ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
75052 unsigned long change_prot_numa(struct vm_area_struct *vma,
75053 unsigned long start, unsigned long end);
75054@@ -1711,6 +1721,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
75055 static inline void vm_stat_account(struct mm_struct *mm,
75056 unsigned long flags, struct file *file, long pages)
75057 {
75058+
75059+#ifdef CONFIG_PAX_RANDMMAP
75060+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
75061+#endif
75062+
75063 mm->total_vm += pages;
75064 }
75065 #endif /* CONFIG_PROC_FS */
75066@@ -1791,7 +1806,7 @@ extern int unpoison_memory(unsigned long pfn);
75067 extern int sysctl_memory_failure_early_kill;
75068 extern int sysctl_memory_failure_recovery;
75069 extern void shake_page(struct page *p, int access);
75070-extern atomic_long_t num_poisoned_pages;
75071+extern atomic_long_unchecked_t num_poisoned_pages;
75072 extern int soft_offline_page(struct page *page, int flags);
75073
75074 extern void dump_page(struct page *page);
75075@@ -1828,5 +1843,11 @@ void __init setup_nr_node_ids(void);
75076 static inline void setup_nr_node_ids(void) {}
75077 #endif
75078
75079+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
75080+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
75081+#else
75082+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
75083+#endif
75084+
75085 #endif /* __KERNEL__ */
75086 #endif /* _LINUX_MM_H */
75087diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
e2b79cd1 75088index 4a189ba..04101d6 100644
bb5f0bf8
AF
75089--- a/include/linux/mm_types.h
75090+++ b/include/linux/mm_types.h
75091@@ -289,6 +289,8 @@ struct vm_area_struct {
75092 #ifdef CONFIG_NUMA
75093 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
75094 #endif
75095+
75096+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
75097 };
75098
75099 struct core_thread {
e2b79cd1 75100@@ -438,6 +440,24 @@ struct mm_struct {
bb5f0bf8
AF
75101 int first_nid;
75102 #endif
75103 struct uprobes_state uprobes_state;
75104+
75105+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
75106+ unsigned long pax_flags;
75107+#endif
75108+
75109+#ifdef CONFIG_PAX_DLRESOLVE
75110+ unsigned long call_dl_resolve;
75111+#endif
75112+
75113+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
75114+ unsigned long call_syscall;
75115+#endif
75116+
75117+#ifdef CONFIG_PAX_ASLR
75118+ unsigned long delta_mmap; /* randomized offset */
75119+ unsigned long delta_stack; /* randomized offset */
75120+#endif
75121+
75122 };
75123
75124 /* first nid will either be a valid NID or one of these values */
75125diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
75126index c5d5278..f0b68c8 100644
75127--- a/include/linux/mmiotrace.h
75128+++ b/include/linux/mmiotrace.h
75129@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
75130 /* Called from ioremap.c */
75131 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
75132 void __iomem *addr);
75133-extern void mmiotrace_iounmap(volatile void __iomem *addr);
75134+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
75135
75136 /* For anyone to insert markers. Remember trailing newline. */
75137 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
75138@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
75139 {
75140 }
75141
75142-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
75143+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
75144 {
75145 }
75146
75147diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
75148index 5c76737..61f518e 100644
75149--- a/include/linux/mmzone.h
75150+++ b/include/linux/mmzone.h
75151@@ -396,7 +396,7 @@ struct zone {
75152 unsigned long flags; /* zone flags, see below */
75153
75154 /* Zone statistics */
75155- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
75156+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
75157
75158 /*
75159 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
75160diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
75161index b508016..237cfe5 100644
75162--- a/include/linux/mod_devicetable.h
75163+++ b/include/linux/mod_devicetable.h
75164@@ -13,7 +13,7 @@
75165 typedef unsigned long kernel_ulong_t;
75166 #endif
75167
75168-#define PCI_ANY_ID (~0)
75169+#define PCI_ANY_ID ((__u16)~0)
75170
75171 struct pci_device_id {
75172 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
75173@@ -139,7 +139,7 @@ struct usb_device_id {
75174 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
75175 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
75176
75177-#define HID_ANY_ID (~0)
75178+#define HID_ANY_ID (~0U)
75179 #define HID_BUS_ANY 0xffff
75180 #define HID_GROUP_ANY 0x0000
75181
75182@@ -465,7 +465,7 @@ struct dmi_system_id {
75183 const char *ident;
75184 struct dmi_strmatch matches[4];
75185 void *driver_data;
75186-};
75187+} __do_const;
75188 /*
75189 * struct dmi_device_id appears during expansion of
75190 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
75191diff --git a/include/linux/module.h b/include/linux/module.h
75192index 46f1ea0..a34ca37 100644
75193--- a/include/linux/module.h
75194+++ b/include/linux/module.h
75195@@ -17,9 +17,11 @@
75196 #include <linux/moduleparam.h>
75197 #include <linux/tracepoint.h>
75198 #include <linux/export.h>
75199+#include <linux/fs.h>
75200
75201 #include <linux/percpu.h>
75202 #include <asm/module.h>
75203+#include <asm/pgtable.h>
75204
75205 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
75206 #define MODULE_SIG_STRING "~Module signature appended~\n"
75207@@ -54,12 +56,13 @@ struct module_attribute {
75208 int (*test)(struct module *);
75209 void (*free)(struct module *);
75210 };
75211+typedef struct module_attribute __no_const module_attribute_no_const;
75212
75213 struct module_version_attribute {
75214 struct module_attribute mattr;
75215 const char *module_name;
75216 const char *version;
75217-} __attribute__ ((__aligned__(sizeof(void *))));
75218+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
75219
75220 extern ssize_t __modver_version_show(struct module_attribute *,
75221 struct module_kobject *, char *);
75222@@ -232,7 +235,7 @@ struct module
75223
75224 /* Sysfs stuff. */
75225 struct module_kobject mkobj;
75226- struct module_attribute *modinfo_attrs;
75227+ module_attribute_no_const *modinfo_attrs;
75228 const char *version;
75229 const char *srcversion;
75230 struct kobject *holders_dir;
75231@@ -281,19 +284,16 @@ struct module
75232 int (*init)(void);
75233
75234 /* If this is non-NULL, vfree after init() returns */
75235- void *module_init;
75236+ void *module_init_rx, *module_init_rw;
75237
75238 /* Here is the actual code + data, vfree'd on unload. */
75239- void *module_core;
75240+ void *module_core_rx, *module_core_rw;
75241
75242 /* Here are the sizes of the init and core sections */
75243- unsigned int init_size, core_size;
75244+ unsigned int init_size_rw, core_size_rw;
75245
75246 /* The size of the executable code in each section. */
75247- unsigned int init_text_size, core_text_size;
75248-
75249- /* Size of RO sections of the module (text+rodata) */
75250- unsigned int init_ro_size, core_ro_size;
75251+ unsigned int init_size_rx, core_size_rx;
75252
75253 /* Arch-specific module values */
75254 struct mod_arch_specific arch;
75255@@ -349,6 +349,10 @@ struct module
75256 #ifdef CONFIG_EVENT_TRACING
75257 struct ftrace_event_call **trace_events;
75258 unsigned int num_trace_events;
75259+ struct file_operations trace_id;
75260+ struct file_operations trace_enable;
75261+ struct file_operations trace_format;
75262+ struct file_operations trace_filter;
75263 #endif
75264 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
75265 unsigned int num_ftrace_callsites;
75266@@ -396,16 +400,46 @@ bool is_module_address(unsigned long addr);
75267 bool is_module_percpu_address(unsigned long addr);
75268 bool is_module_text_address(unsigned long addr);
75269
75270+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
75271+{
75272+
75273+#ifdef CONFIG_PAX_KERNEXEC
75274+ if (ktla_ktva(addr) >= (unsigned long)start &&
75275+ ktla_ktva(addr) < (unsigned long)start + size)
75276+ return 1;
75277+#endif
75278+
75279+ return ((void *)addr >= start && (void *)addr < start + size);
75280+}
75281+
75282+static inline int within_module_core_rx(unsigned long addr, const struct module *mod)
75283+{
75284+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
75285+}
75286+
75287+static inline int within_module_core_rw(unsigned long addr, const struct module *mod)
75288+{
75289+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
75290+}
75291+
75292+static inline int within_module_init_rx(unsigned long addr, const struct module *mod)
75293+{
75294+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
75295+}
75296+
75297+static inline int within_module_init_rw(unsigned long addr, const struct module *mod)
75298+{
75299+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
75300+}
75301+
75302 static inline int within_module_core(unsigned long addr, const struct module *mod)
75303 {
75304- return (unsigned long)mod->module_core <= addr &&
75305- addr < (unsigned long)mod->module_core + mod->core_size;
75306+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
75307 }
75308
75309 static inline int within_module_init(unsigned long addr, const struct module *mod)
75310 {
75311- return (unsigned long)mod->module_init <= addr &&
75312- addr < (unsigned long)mod->module_init + mod->init_size;
75313+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
75314 }
75315
75316 /* Search for module by name: must hold module_mutex. */
75317diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
75318index 560ca53..ef621ef 100644
75319--- a/include/linux/moduleloader.h
75320+++ b/include/linux/moduleloader.h
75321@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
75322 sections. Returns NULL on failure. */
75323 void *module_alloc(unsigned long size);
75324
75325+#ifdef CONFIG_PAX_KERNEXEC
75326+void *module_alloc_exec(unsigned long size);
75327+#else
75328+#define module_alloc_exec(x) module_alloc(x)
75329+#endif
75330+
75331 /* Free memory returned from module_alloc. */
75332 void module_free(struct module *mod, void *module_region);
75333
75334+#ifdef CONFIG_PAX_KERNEXEC
75335+void module_free_exec(struct module *mod, void *module_region);
75336+#else
75337+#define module_free_exec(x, y) module_free((x), (y))
75338+#endif
75339+
75340 /*
75341 * Apply the given relocation to the (simplified) ELF. Return -error
75342 * or 0.
75343@@ -45,7 +57,9 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
75344 unsigned int relsec,
75345 struct module *me)
75346 {
75347+#ifdef CONFIG_MODULES
75348 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
75349+#endif
75350 return -ENOEXEC;
75351 }
75352 #endif
75353@@ -67,7 +81,9 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
75354 unsigned int relsec,
75355 struct module *me)
75356 {
75357+#ifdef CONFIG_MODULES
75358 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
75359+#endif
75360 return -ENOEXEC;
75361 }
75362 #endif
75363diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
75364index 137b419..fe663ec 100644
75365--- a/include/linux/moduleparam.h
75366+++ b/include/linux/moduleparam.h
75367@@ -284,7 +284,7 @@ static inline void __kernel_param_unlock(void)
75368 * @len is usually just sizeof(string).
75369 */
75370 #define module_param_string(name, string, len, perm) \
75371- static const struct kparam_string __param_string_##name \
75372+ static const struct kparam_string __param_string_##name __used \
75373 = { len, string }; \
75374 __module_param_call(MODULE_PARAM_PREFIX, name, \
75375 &param_ops_string, \
75376@@ -423,7 +423,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
75377 */
75378 #define module_param_array_named(name, array, type, nump, perm) \
75379 param_check_##type(name, &(array)[0]); \
75380- static const struct kparam_array __param_arr_##name \
75381+ static const struct kparam_array __param_arr_##name __used \
75382 = { .max = ARRAY_SIZE(array), .num = nump, \
75383 .ops = &param_ops_##type, \
75384 .elemsize = sizeof(array[0]), .elem = array }; \
75385diff --git a/include/linux/namei.h b/include/linux/namei.h
75386index 5a5ff57..5ae5070 100644
75387--- a/include/linux/namei.h
75388+++ b/include/linux/namei.h
75389@@ -19,7 +19,7 @@ struct nameidata {
75390 unsigned seq;
75391 int last_type;
75392 unsigned depth;
75393- char *saved_names[MAX_NESTED_LINKS + 1];
75394+ const char *saved_names[MAX_NESTED_LINKS + 1];
75395 };
75396
75397 /*
75398@@ -84,12 +84,12 @@ extern void unlock_rename(struct dentry *, struct dentry *);
75399
75400 extern void nd_jump_link(struct nameidata *nd, struct path *path);
75401
75402-static inline void nd_set_link(struct nameidata *nd, char *path)
75403+static inline void nd_set_link(struct nameidata *nd, const char *path)
75404 {
75405 nd->saved_names[nd->depth] = path;
75406 }
75407
75408-static inline char *nd_get_link(struct nameidata *nd)
75409+static inline const char *nd_get_link(const struct nameidata *nd)
75410 {
75411 return nd->saved_names[nd->depth];
75412 }
75413diff --git a/include/linux/net.h b/include/linux/net.h
75414index 99c9f0c..e1cf296 100644
75415--- a/include/linux/net.h
75416+++ b/include/linux/net.h
75417@@ -183,7 +183,7 @@ struct net_proto_family {
75418 int (*create)(struct net *net, struct socket *sock,
75419 int protocol, int kern);
75420 struct module *owner;
75421-};
75422+} __do_const;
75423
75424 struct iovec;
75425 struct kvec;
75426diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
75427index 96e4c21..9cc8278 100644
75428--- a/include/linux/netdevice.h
75429+++ b/include/linux/netdevice.h
75430@@ -1026,6 +1026,7 @@ struct net_device_ops {
75431 int (*ndo_change_carrier)(struct net_device *dev,
75432 bool new_carrier);
75433 };
75434+typedef struct net_device_ops __no_const net_device_ops_no_const;
75435
75436 /*
75437 * The DEVICE structure.
75438@@ -1094,7 +1095,7 @@ struct net_device {
75439 int iflink;
75440
75441 struct net_device_stats stats;
75442- atomic_long_t rx_dropped; /* dropped packets by core network
75443+ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
75444 * Do not use this in drivers.
75445 */
75446
75447diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
75448index 0060fde..481c6ae 100644
75449--- a/include/linux/netfilter.h
75450+++ b/include/linux/netfilter.h
75451@@ -82,7 +82,7 @@ struct nf_sockopt_ops {
75452 #endif
75453 /* Use the module struct to lock set/get code in place */
75454 struct module *owner;
75455-};
75456+} __do_const;
75457
75458 /* Function to register/unregister hook points. */
75459 int nf_register_hook(struct nf_hook_ops *reg);
75460diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
75461index d80e275..c3510b8 100644
75462--- a/include/linux/netfilter/ipset/ip_set.h
75463+++ b/include/linux/netfilter/ipset/ip_set.h
75464@@ -124,7 +124,7 @@ struct ip_set_type_variant {
75465 /* Return true if "b" set is the same as "a"
75466 * according to the create set parameters */
75467 bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
75468-};
75469+} __do_const;
75470
75471 /* The core set type structure */
75472 struct ip_set_type {
75473diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
75474index cadb740..d7c37c0 100644
75475--- a/include/linux/netfilter/nfnetlink.h
75476+++ b/include/linux/netfilter/nfnetlink.h
75477@@ -16,7 +16,7 @@ struct nfnl_callback {
75478 const struct nlattr * const cda[]);
75479 const struct nla_policy *policy; /* netlink attribute policy */
75480 const u_int16_t attr_count; /* number of nlattr's */
75481-};
75482+} __do_const;
75483
75484 struct nfnetlink_subsystem {
75485 const char *name;
75486diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
75487new file mode 100644
75488index 0000000..33f4af8
75489--- /dev/null
75490+++ b/include/linux/netfilter/xt_gradm.h
75491@@ -0,0 +1,9 @@
75492+#ifndef _LINUX_NETFILTER_XT_GRADM_H
75493+#define _LINUX_NETFILTER_XT_GRADM_H 1
75494+
75495+struct xt_gradm_mtinfo {
75496+ __u16 flags;
75497+ __u16 invflags;
75498+};
75499+
75500+#endif
75501diff --git a/include/linux/nls.h b/include/linux/nls.h
75502index 5dc635f..35f5e11 100644
75503--- a/include/linux/nls.h
75504+++ b/include/linux/nls.h
75505@@ -31,7 +31,7 @@ struct nls_table {
75506 const unsigned char *charset2upper;
75507 struct module *owner;
75508 struct nls_table *next;
75509-};
75510+} __do_const;
75511
75512 /* this value hold the maximum octet of charset */
75513 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
75514diff --git a/include/linux/notifier.h b/include/linux/notifier.h
75515index d14a4c3..a078786 100644
75516--- a/include/linux/notifier.h
75517+++ b/include/linux/notifier.h
75518@@ -54,7 +54,8 @@ struct notifier_block {
75519 notifier_fn_t notifier_call;
75520 struct notifier_block __rcu *next;
75521 int priority;
75522-};
75523+} __do_const;
75524+typedef struct notifier_block __no_const notifier_block_no_const;
75525
75526 struct atomic_notifier_head {
75527 spinlock_t lock;
75528diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
75529index a4c5624..79d6d88 100644
75530--- a/include/linux/oprofile.h
75531+++ b/include/linux/oprofile.h
75532@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
75533 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
75534 char const * name, ulong * val);
75535
75536-/** Create a file for read-only access to an atomic_t. */
75537+/** Create a file for read-only access to an atomic_unchecked_t. */
75538 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
75539- char const * name, atomic_t * val);
75540+ char const * name, atomic_unchecked_t * val);
75541
75542 /** create a directory */
75543 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
75544diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
75545index 8db71dc..a76bf2c 100644
75546--- a/include/linux/pci_hotplug.h
75547+++ b/include/linux/pci_hotplug.h
75548@@ -80,7 +80,8 @@ struct hotplug_slot_ops {
75549 int (*get_attention_status) (struct hotplug_slot *slot, u8 *value);
75550 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
75551 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
75552-};
75553+} __do_const;
75554+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
75555
75556 /**
75557 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
75558diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
75559index c5b6dbf..b124155 100644
75560--- a/include/linux/perf_event.h
75561+++ b/include/linux/perf_event.h
75562@@ -318,8 +318,8 @@ struct perf_event {
75563
75564 enum perf_event_active_state state;
75565 unsigned int attach_state;
75566- local64_t count;
75567- atomic64_t child_count;
75568+ local64_t count; /* PaX: fix it one day */
75569+ atomic64_unchecked_t child_count;
75570
75571 /*
75572 * These are the total time in nanoseconds that the event
75573@@ -370,8 +370,8 @@ struct perf_event {
75574 * These accumulate total time (in nanoseconds) that children
75575 * events have been enabled and running, respectively.
75576 */
75577- atomic64_t child_total_time_enabled;
75578- atomic64_t child_total_time_running;
75579+ atomic64_unchecked_t child_total_time_enabled;
75580+ atomic64_unchecked_t child_total_time_running;
75581
75582 /*
75583 * Protect attach/detach and child_list:
75584@@ -692,7 +692,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
75585 entry->ip[entry->nr++] = ip;
75586 }
75587
75588-extern int sysctl_perf_event_paranoid;
75589+extern int sysctl_perf_event_legitimately_concerned;
75590 extern int sysctl_perf_event_mlock;
75591 extern int sysctl_perf_event_sample_rate;
75592
75593@@ -700,19 +700,24 @@ extern int perf_proc_update_handler(struct ctl_table *table, int write,
75594 void __user *buffer, size_t *lenp,
75595 loff_t *ppos);
75596
75597+static inline bool perf_paranoid_any(void)
75598+{
75599+ return sysctl_perf_event_legitimately_concerned > 2;
75600+}
75601+
75602 static inline bool perf_paranoid_tracepoint_raw(void)
75603 {
75604- return sysctl_perf_event_paranoid > -1;
75605+ return sysctl_perf_event_legitimately_concerned > -1;
75606 }
75607
75608 static inline bool perf_paranoid_cpu(void)
75609 {
75610- return sysctl_perf_event_paranoid > 0;
75611+ return sysctl_perf_event_legitimately_concerned > 0;
75612 }
75613
75614 static inline bool perf_paranoid_kernel(void)
75615 {
75616- return sysctl_perf_event_paranoid > 1;
75617+ return sysctl_perf_event_legitimately_concerned > 1;
75618 }
75619
75620 extern void perf_event_init(void);
75621@@ -806,7 +811,7 @@ static inline void perf_restore_debug_store(void) { }
75622 */
75623 #define perf_cpu_notifier(fn) \
75624 do { \
75625- static struct notifier_block fn##_nb __cpuinitdata = \
75626+ static struct notifier_block fn##_nb = \
75627 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
75628 unsigned long cpu = smp_processor_id(); \
75629 unsigned long flags; \
75630@@ -826,7 +831,7 @@ struct perf_pmu_events_attr {
75631 struct device_attribute attr;
75632 u64 id;
75633 const char *event_str;
75634-};
75635+} __do_const;
75636
75637 #define PMU_EVENT_ATTR(_name, _var, _id, _show) \
75638 static struct perf_pmu_events_attr _var = { \
75639diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
75640index b8809fe..ae4ccd0 100644
75641--- a/include/linux/pipe_fs_i.h
75642+++ b/include/linux/pipe_fs_i.h
75643@@ -47,10 +47,10 @@ struct pipe_inode_info {
75644 struct mutex mutex;
75645 wait_queue_head_t wait;
75646 unsigned int nrbufs, curbuf, buffers;
75647- unsigned int readers;
75648- unsigned int writers;
75649- unsigned int files;
75650- unsigned int waiting_writers;
75651+ atomic_t readers;
75652+ atomic_t writers;
75653+ atomic_t files;
75654+ atomic_t waiting_writers;
75655 unsigned int r_counter;
75656 unsigned int w_counter;
75657 struct page *tmp_page;
75658diff --git a/include/linux/platform_data/usb-ehci-s5p.h b/include/linux/platform_data/usb-ehci-s5p.h
75659index 5f28cae..3d23723 100644
75660--- a/include/linux/platform_data/usb-ehci-s5p.h
75661+++ b/include/linux/platform_data/usb-ehci-s5p.h
75662@@ -14,7 +14,7 @@
75663 struct s5p_ehci_platdata {
75664 int (*phy_init)(struct platform_device *pdev, int type);
75665 int (*phy_exit)(struct platform_device *pdev, int type);
75666-};
75667+} __no_const;
75668
75669 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
75670
75671diff --git a/include/linux/platform_data/usb-ohci-exynos.h b/include/linux/platform_data/usb-ohci-exynos.h
75672index c256c59..8ea94c7 100644
75673--- a/include/linux/platform_data/usb-ohci-exynos.h
75674+++ b/include/linux/platform_data/usb-ohci-exynos.h
75675@@ -14,7 +14,7 @@
75676 struct exynos4_ohci_platdata {
75677 int (*phy_init)(struct platform_device *pdev, int type);
75678 int (*phy_exit)(struct platform_device *pdev, int type);
75679-};
75680+} __no_const;
75681
75682 extern void exynos4_ohci_set_platdata(struct exynos4_ohci_platdata *pd);
75683
75684diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
75685index 7c1d252..c5c773e 100644
75686--- a/include/linux/pm_domain.h
75687+++ b/include/linux/pm_domain.h
75688@@ -48,7 +48,7 @@ struct gpd_dev_ops {
75689
75690 struct gpd_cpu_data {
75691 unsigned int saved_exit_latency;
75692- struct cpuidle_state *idle_state;
75693+ cpuidle_state_no_const *idle_state;
75694 };
75695
75696 struct generic_pm_domain {
75697diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
75698index 7d7e09e..8671ef8 100644
75699--- a/include/linux/pm_runtime.h
75700+++ b/include/linux/pm_runtime.h
75701@@ -104,7 +104,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
75702
75703 static inline void pm_runtime_mark_last_busy(struct device *dev)
75704 {
75705- ACCESS_ONCE(dev->power.last_busy) = jiffies;
75706+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
75707 }
75708
75709 #else /* !CONFIG_PM_RUNTIME */
75710diff --git a/include/linux/pnp.h b/include/linux/pnp.h
75711index 195aafc..49a7bc2 100644
75712--- a/include/linux/pnp.h
75713+++ b/include/linux/pnp.h
75714@@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
75715 struct pnp_fixup {
75716 char id[7];
75717 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
75718-};
75719+} __do_const;
75720
75721 /* config parameters */
75722 #define PNP_CONFIG_NORMAL 0x0001
75723diff --git a/include/linux/poison.h b/include/linux/poison.h
75724index 2110a81..13a11bb 100644
75725--- a/include/linux/poison.h
75726+++ b/include/linux/poison.h
75727@@ -19,8 +19,8 @@
75728 * under normal circumstances, used to verify that nobody uses
75729 * non-initialized list entries.
75730 */
75731-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
75732-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
75733+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
75734+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
75735
75736 /********** include/linux/timer.h **********/
75737 /*
75738diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
75739index c0f44c2..1572583 100644
75740--- a/include/linux/power/smartreflex.h
75741+++ b/include/linux/power/smartreflex.h
75742@@ -238,7 +238,7 @@ struct omap_sr_class_data {
75743 int (*notify)(struct omap_sr *sr, u32 status);
75744 u8 notify_flags;
75745 u8 class_type;
75746-};
75747+} __do_const;
75748
75749 /**
75750 * struct omap_sr_nvalue_table - Smartreflex n-target value info
75751diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
75752index 4ea1d37..80f4b33 100644
75753--- a/include/linux/ppp-comp.h
75754+++ b/include/linux/ppp-comp.h
75755@@ -84,7 +84,7 @@ struct compressor {
75756 struct module *owner;
75757 /* Extra skb space needed by the compressor algorithm */
75758 unsigned int comp_extra;
75759-};
75760+} __do_const;
75761
75762 /*
75763 * The return value from decompress routine is the length of the
75764diff --git a/include/linux/preempt.h b/include/linux/preempt.h
75765index f5d4723..a6ea2fa 100644
75766--- a/include/linux/preempt.h
75767+++ b/include/linux/preempt.h
75768@@ -18,8 +18,13 @@
75769 # define sub_preempt_count(val) do { preempt_count() -= (val); } while (0)
75770 #endif
75771
75772+#define raw_add_preempt_count(val) do { preempt_count() += (val); } while (0)
75773+#define raw_sub_preempt_count(val) do { preempt_count() -= (val); } while (0)
75774+
75775 #define inc_preempt_count() add_preempt_count(1)
75776+#define raw_inc_preempt_count() raw_add_preempt_count(1)
75777 #define dec_preempt_count() sub_preempt_count(1)
75778+#define raw_dec_preempt_count() raw_sub_preempt_count(1)
75779
75780 #define preempt_count() (current_thread_info()->preempt_count)
75781
75782@@ -64,6 +69,12 @@ do { \
75783 barrier(); \
75784 } while (0)
75785
75786+#define raw_preempt_disable() \
75787+do { \
75788+ raw_inc_preempt_count(); \
75789+ barrier(); \
75790+} while (0)
75791+
75792 #define sched_preempt_enable_no_resched() \
75793 do { \
75794 barrier(); \
75795@@ -72,6 +83,12 @@ do { \
75796
75797 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
75798
75799+#define raw_preempt_enable_no_resched() \
75800+do { \
75801+ barrier(); \
75802+ raw_dec_preempt_count(); \
75803+} while (0)
75804+
75805 #define preempt_enable() \
75806 do { \
75807 preempt_enable_no_resched(); \
75808@@ -116,8 +133,10 @@ do { \
75809 * region.
75810 */
75811 #define preempt_disable() barrier()
75812+#define raw_preempt_disable() barrier()
75813 #define sched_preempt_enable_no_resched() barrier()
75814 #define preempt_enable_no_resched() barrier()
75815+#define raw_preempt_enable_no_resched() barrier()
75816 #define preempt_enable() barrier()
75817
75818 #define preempt_disable_notrace() barrier()
75819diff --git a/include/linux/printk.h b/include/linux/printk.h
75820index 22c7052..ad3fa0a 100644
75821--- a/include/linux/printk.h
75822+++ b/include/linux/printk.h
75823@@ -106,6 +106,8 @@ static inline __printf(1, 2) __cold
75824 void early_printk(const char *s, ...) { }
75825 #endif
75826
75827+extern int kptr_restrict;
75828+
75829 #ifdef CONFIG_PRINTK
75830 asmlinkage __printf(5, 0)
75831 int vprintk_emit(int facility, int level,
75832@@ -140,7 +142,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
75833
75834 extern int printk_delay_msec;
75835 extern int dmesg_restrict;
75836-extern int kptr_restrict;
75837
75838 extern void wake_up_klogd(void);
75839
75840diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
75841index 608e60a..c26f864 100644
75842--- a/include/linux/proc_fs.h
75843+++ b/include/linux/proc_fs.h
75844@@ -34,6 +34,19 @@ static inline struct proc_dir_entry *proc_create(
75845 return proc_create_data(name, mode, parent, proc_fops, NULL);
75846 }
75847
75848+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
75849+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
75850+{
75851+#ifdef CONFIG_GRKERNSEC_PROC_USER
75852+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
75853+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
75854+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
75855+#else
75856+ return proc_create_data(name, mode, parent, proc_fops, NULL);
75857+#endif
75858+}
75859+
75860+
75861 extern void proc_set_size(struct proc_dir_entry *, loff_t);
75862 extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
75863 extern void *PDE_DATA(const struct inode *);
75864diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
75865index 34a1e10..03a6d03 100644
75866--- a/include/linux/proc_ns.h
75867+++ b/include/linux/proc_ns.h
75868@@ -14,7 +14,7 @@ struct proc_ns_operations {
75869 void (*put)(void *ns);
75870 int (*install)(struct nsproxy *nsproxy, void *ns);
75871 unsigned int (*inum)(void *ns);
75872-};
75873+} __do_const;
75874
75875 struct proc_ns {
75876 void *ns;
75877diff --git a/include/linux/random.h b/include/linux/random.h
75878index 3b9377d..61b506a 100644
75879--- a/include/linux/random.h
75880+++ b/include/linux/random.h
75881@@ -32,6 +32,11 @@ void prandom_seed(u32 seed);
75882 u32 prandom_u32_state(struct rnd_state *);
75883 void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
75884
75885+static inline unsigned long pax_get_random_long(void)
75886+{
75887+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
75888+}
75889+
75890 /*
75891 * Handle minimum values for seeds
75892 */
75893diff --git a/include/linux/rculist.h b/include/linux/rculist.h
75894index f4b1001..8ddb2b6 100644
75895--- a/include/linux/rculist.h
75896+++ b/include/linux/rculist.h
75897@@ -44,6 +44,9 @@ extern void __list_add_rcu(struct list_head *new,
75898 struct list_head *prev, struct list_head *next);
75899 #endif
75900
75901+extern void __pax_list_add_rcu(struct list_head *new,
75902+ struct list_head *prev, struct list_head *next);
75903+
75904 /**
75905 * list_add_rcu - add a new entry to rcu-protected list
75906 * @new: new entry to be added
75907@@ -65,6 +68,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
75908 __list_add_rcu(new, head, head->next);
75909 }
75910
75911+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
75912+{
75913+ __pax_list_add_rcu(new, head, head->next);
75914+}
75915+
75916 /**
75917 * list_add_tail_rcu - add a new entry to rcu-protected list
75918 * @new: new entry to be added
75919@@ -87,6 +95,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
75920 __list_add_rcu(new, head->prev, head);
75921 }
75922
75923+static inline void pax_list_add_tail_rcu(struct list_head *new,
75924+ struct list_head *head)
75925+{
75926+ __pax_list_add_rcu(new, head->prev, head);
75927+}
75928+
75929 /**
75930 * list_del_rcu - deletes entry from list without re-initialization
75931 * @entry: the element to delete from the list.
75932@@ -117,6 +131,8 @@ static inline void list_del_rcu(struct list_head *entry)
75933 entry->prev = LIST_POISON2;
75934 }
75935
75936+extern void pax_list_del_rcu(struct list_head *entry);
75937+
75938 /**
75939 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
75940 * @n: the element to delete from the hash list.
75941diff --git a/include/linux/reboot.h b/include/linux/reboot.h
75942index 23b3630..e1bc12b 100644
75943--- a/include/linux/reboot.h
75944+++ b/include/linux/reboot.h
75945@@ -18,9 +18,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
75946 * Architecture-specific implementations of sys_reboot commands.
75947 */
75948
75949-extern void machine_restart(char *cmd);
75950-extern void machine_halt(void);
75951-extern void machine_power_off(void);
75952+extern void machine_restart(char *cmd) __noreturn;
75953+extern void machine_halt(void) __noreturn;
75954+extern void machine_power_off(void) __noreturn;
75955
75956 extern void machine_shutdown(void);
75957 struct pt_regs;
75958@@ -31,9 +31,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
75959 */
75960
75961 extern void kernel_restart_prepare(char *cmd);
75962-extern void kernel_restart(char *cmd);
75963-extern void kernel_halt(void);
75964-extern void kernel_power_off(void);
75965+extern void kernel_restart(char *cmd) __noreturn;
75966+extern void kernel_halt(void) __noreturn;
75967+extern void kernel_power_off(void) __noreturn;
75968
75969 extern int C_A_D; /* for sysctl */
75970 void ctrl_alt_del(void);
75971@@ -47,7 +47,7 @@ extern int orderly_poweroff(bool force);
75972 * Emergency restart, callable from an interrupt handler.
75973 */
75974
75975-extern void emergency_restart(void);
75976+extern void emergency_restart(void) __noreturn;
75977 #include <asm/emergency-restart.h>
75978
75979 #endif /* _LINUX_REBOOT_H */
75980diff --git a/include/linux/regset.h b/include/linux/regset.h
75981index 8e0c9fe..ac4d221 100644
75982--- a/include/linux/regset.h
75983+++ b/include/linux/regset.h
75984@@ -161,7 +161,8 @@ struct user_regset {
75985 unsigned int align;
75986 unsigned int bias;
75987 unsigned int core_note_type;
75988-};
75989+} __do_const;
75990+typedef struct user_regset __no_const user_regset_no_const;
75991
75992 /**
75993 * struct user_regset_view - available regsets
75994diff --git a/include/linux/relay.h b/include/linux/relay.h
75995index d7c8359..818daf5 100644
75996--- a/include/linux/relay.h
75997+++ b/include/linux/relay.h
75998@@ -157,7 +157,7 @@ struct rchan_callbacks
75999 * The callback should return 0 if successful, negative if not.
76000 */
76001 int (*remove_buf_file)(struct dentry *dentry);
76002-};
76003+} __no_const;
76004
76005 /*
76006 * CONFIG_RELAY kernel API, kernel/relay.c
76007diff --git a/include/linux/rio.h b/include/linux/rio.h
76008index 18e0993..8ab5b21 100644
76009--- a/include/linux/rio.h
76010+++ b/include/linux/rio.h
76011@@ -345,7 +345,7 @@ struct rio_ops {
76012 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
76013 u64 rstart, u32 size, u32 flags);
76014 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
76015-};
76016+} __no_const;
76017
76018 #define RIO_RESOURCE_MEM 0x00000100
76019 #define RIO_RESOURCE_DOORBELL 0x00000200
76020diff --git a/include/linux/rmap.h b/include/linux/rmap.h
76021index 6dacb93..6174423 100644
76022--- a/include/linux/rmap.h
76023+++ b/include/linux/rmap.h
76024@@ -145,8 +145,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
76025 void anon_vma_init(void); /* create anon_vma_cachep */
76026 int anon_vma_prepare(struct vm_area_struct *);
76027 void unlink_anon_vmas(struct vm_area_struct *);
76028-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
76029-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
76030+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
76031+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
76032
76033 static inline void anon_vma_merge(struct vm_area_struct *vma,
76034 struct vm_area_struct *next)
76035diff --git a/include/linux/sched.h b/include/linux/sched.h
e2b79cd1 76036index 178a8d9..918ea01 100644
bb5f0bf8
AF
76037--- a/include/linux/sched.h
76038+++ b/include/linux/sched.h
76039@@ -62,6 +62,7 @@ struct bio_list;
76040 struct fs_struct;
76041 struct perf_event_context;
76042 struct blk_plug;
76043+struct linux_binprm;
76044
76045 /*
76046 * List of flags we want to share for kernel threads,
76047@@ -303,7 +304,7 @@ extern char __sched_text_start[], __sched_text_end[];
76048 extern int in_sched_functions(unsigned long addr);
76049
76050 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
76051-extern signed long schedule_timeout(signed long timeout);
76052+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
76053 extern signed long schedule_timeout_interruptible(signed long timeout);
76054 extern signed long schedule_timeout_killable(signed long timeout);
76055 extern signed long schedule_timeout_uninterruptible(signed long timeout);
e2b79cd1 76056@@ -314,6 +315,18 @@ struct nsproxy;
bb5f0bf8
AF
76057 struct user_namespace;
76058
76059 #ifdef CONFIG_MMU
bb5f0bf8
AF
76060+
76061+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
76062+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
76063+#else
76064+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
76065+{
76066+ return 0;
76067+}
76068+#endif
76069+
76070+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
76071+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
bb5f0bf8
AF
76072 extern void arch_pick_mmap_layout(struct mm_struct *mm);
76073 extern unsigned long
76074 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
e2b79cd1 76075@@ -591,6 +604,17 @@ struct signal_struct {
bb5f0bf8
AF
76076 #ifdef CONFIG_TASKSTATS
76077 struct taskstats *stats;
76078 #endif
76079+
76080+#ifdef CONFIG_GRKERNSEC
76081+ u32 curr_ip;
76082+ u32 saved_ip;
76083+ u32 gr_saddr;
76084+ u32 gr_daddr;
76085+ u16 gr_sport;
76086+ u16 gr_dport;
76087+ u8 used_accept:1;
76088+#endif
76089+
76090 #ifdef CONFIG_AUDIT
76091 unsigned audit_tty;
76092 unsigned audit_tty_log_passwd;
e2b79cd1 76093@@ -671,6 +695,14 @@ struct user_struct {
bb5f0bf8
AF
76094 struct key *session_keyring; /* UID's default session keyring */
76095 #endif
76096
76097+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
76098+ unsigned char kernel_banned;
76099+#endif
76100+#ifdef CONFIG_GRKERNSEC_BRUTE
76101+ unsigned char suid_banned;
76102+ unsigned long suid_ban_expires;
76103+#endif
76104+
76105 /* Hash table maintenance information */
76106 struct hlist_node uidhash_node;
76107 kuid_t uid;
e2b79cd1 76108@@ -1158,8 +1190,8 @@ struct task_struct {
bb5f0bf8
AF
76109 struct list_head thread_group;
76110
76111 struct completion *vfork_done; /* for vfork() */
76112- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
76113- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
76114+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
76115+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
76116
76117 cputime_t utime, stime, utimescaled, stimescaled;
76118 cputime_t gtime;
e2b79cd1 76119@@ -1184,11 +1216,6 @@ struct task_struct {
bb5f0bf8
AF
76120 struct task_cputime cputime_expires;
76121 struct list_head cpu_timers[3];
76122
76123-/* process credentials */
76124- const struct cred __rcu *real_cred; /* objective and real subjective task
76125- * credentials (COW) */
76126- const struct cred __rcu *cred; /* effective (overridable) subjective task
76127- * credentials (COW) */
76128 char comm[TASK_COMM_LEN]; /* executable name excluding path
76129 - access with [gs]et_task_comm (which lock
76130 it with task_lock())
e2b79cd1 76131@@ -1205,6 +1232,10 @@ struct task_struct {
bb5f0bf8
AF
76132 #endif
76133 /* CPU-specific state of this task */
76134 struct thread_struct thread;
76135+/* thread_info moved to task_struct */
76136+#ifdef CONFIG_X86
76137+ struct thread_info tinfo;
76138+#endif
76139 /* filesystem information */
76140 struct fs_struct *fs;
76141 /* open file information */
e2b79cd1 76142@@ -1278,6 +1309,10 @@ struct task_struct {
bb5f0bf8
AF
76143 gfp_t lockdep_reclaim_gfp;
76144 #endif
76145
76146+/* process credentials */
76147+ const struct cred __rcu *real_cred; /* objective and real subjective task
76148+ * credentials (COW) */
76149+
76150 /* journalling filesystem info */
76151 void *journal_info;
76152
e2b79cd1 76153@@ -1316,6 +1351,10 @@ struct task_struct {
bb5f0bf8
AF
76154 /* cg_list protected by css_set_lock and tsk->alloc_lock */
76155 struct list_head cg_list;
76156 #endif
76157+
76158+ const struct cred __rcu *cred; /* effective (overridable) subjective task
76159+ * credentials (COW) */
76160+
76161 #ifdef CONFIG_FUTEX
76162 struct robust_list_head __user *robust_list;
76163 #ifdef CONFIG_COMPAT
e2b79cd1 76164@@ -1416,8 +1455,76 @@ struct task_struct {
bb5f0bf8
AF
76165 unsigned int sequential_io;
76166 unsigned int sequential_io_avg;
76167 #endif
76168+
76169+#ifdef CONFIG_GRKERNSEC
76170+ /* grsecurity */
76171+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
76172+ u64 exec_id;
76173+#endif
76174+#ifdef CONFIG_GRKERNSEC_SETXID
76175+ const struct cred *delayed_cred;
76176+#endif
76177+ struct dentry *gr_chroot_dentry;
76178+ struct acl_subject_label *acl;
76179+ struct acl_role_label *role;
76180+ struct file *exec_file;
76181+ unsigned long brute_expires;
76182+ u16 acl_role_id;
76183+ /* is this the task that authenticated to the special role */
76184+ u8 acl_sp_role;
76185+ u8 is_writable;
76186+ u8 brute;
76187+ u8 gr_is_chrooted;
76188+#endif
76189+
76190 };
76191
76192+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
76193+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
76194+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
76195+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
76196+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
76197+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
76198+
76199+#ifdef CONFIG_PAX_SOFTMODE
76200+extern int pax_softmode;
76201+#endif
76202+
76203+extern int pax_check_flags(unsigned long *);
76204+
76205+/* if tsk != current then task_lock must be held on it */
76206+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
76207+static inline unsigned long pax_get_flags(struct task_struct *tsk)
76208+{
76209+ if (likely(tsk->mm))
76210+ return tsk->mm->pax_flags;
76211+ else
76212+ return 0UL;
76213+}
76214+
76215+/* if tsk != current then task_lock must be held on it */
76216+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
76217+{
76218+ if (likely(tsk->mm)) {
76219+ tsk->mm->pax_flags = flags;
76220+ return 0;
76221+ }
76222+ return -EINVAL;
76223+}
76224+#endif
76225+
76226+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
76227+extern void pax_set_initial_flags(struct linux_binprm *bprm);
76228+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
76229+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
76230+#endif
76231+
76232+struct path;
76233+extern char *pax_get_path(const struct path *path, char *buf, int buflen);
76234+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
76235+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
76236+extern void pax_report_refcount_overflow(struct pt_regs *regs);
76237+
76238 /* Future-safe accessor for struct task_struct's cpus_allowed. */
76239 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
76240
e2b79cd1 76241@@ -1476,7 +1583,7 @@ struct pid_namespace;
bb5f0bf8
AF
76242 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
76243 struct pid_namespace *ns);
76244
76245-static inline pid_t task_pid_nr(struct task_struct *tsk)
76246+static inline pid_t task_pid_nr(const struct task_struct *tsk)
76247 {
76248 return tsk->pid;
76249 }
e2b79cd1 76250@@ -1919,7 +2026,9 @@ void yield(void);
bb5f0bf8
AF
76251 extern struct exec_domain default_exec_domain;
76252
76253 union thread_union {
76254+#ifndef CONFIG_X86
76255 struct thread_info thread_info;
76256+#endif
76257 unsigned long stack[THREAD_SIZE/sizeof(long)];
76258 };
76259
e2b79cd1 76260@@ -1952,6 +2061,7 @@ extern struct pid_namespace init_pid_ns;
bb5f0bf8
AF
76261 */
76262
76263 extern struct task_struct *find_task_by_vpid(pid_t nr);
76264+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
76265 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
76266 struct pid_namespace *ns);
76267
e2b79cd1 76268@@ -2118,7 +2228,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
bb5f0bf8
AF
76269 extern void exit_itimers(struct signal_struct *);
76270 extern void flush_itimer_signals(void);
76271
76272-extern void do_group_exit(int);
76273+extern __noreturn void do_group_exit(int);
76274
76275 extern int allow_signal(int);
76276 extern int disallow_signal(int);
e2b79cd1 76277@@ -2309,9 +2419,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
bb5f0bf8
AF
76278
76279 #endif
76280
76281-static inline int object_is_on_stack(void *obj)
76282+static inline int object_starts_on_stack(void *obj)
76283 {
76284- void *stack = task_stack_page(current);
76285+ const void *stack = task_stack_page(current);
76286
76287 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
76288 }
76289diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
76290index bf8086b..962b035 100644
76291--- a/include/linux/sched/sysctl.h
76292+++ b/include/linux/sched/sysctl.h
76293@@ -30,6 +30,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };
76294 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
76295
76296 extern int sysctl_max_map_count;
76297+extern unsigned long sysctl_heap_stack_gap;
76298
76299 extern unsigned int sysctl_sched_latency;
76300 extern unsigned int sysctl_sched_min_granularity;
76301diff --git a/include/linux/security.h b/include/linux/security.h
76302index 4686491..2bd210e 100644
76303--- a/include/linux/security.h
76304+++ b/include/linux/security.h
76305@@ -26,6 +26,7 @@
76306 #include <linux/capability.h>
76307 #include <linux/slab.h>
76308 #include <linux/err.h>
76309+#include <linux/grsecurity.h>
76310
76311 struct linux_binprm;
76312 struct cred;
76313diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
76314index 2da29ac..aac448ec 100644
76315--- a/include/linux/seq_file.h
76316+++ b/include/linux/seq_file.h
76317@@ -26,6 +26,9 @@ struct seq_file {
76318 struct mutex lock;
76319 const struct seq_operations *op;
76320 int poll_event;
76321+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
76322+ u64 exec_id;
76323+#endif
76324 #ifdef CONFIG_USER_NS
76325 struct user_namespace *user_ns;
76326 #endif
76327@@ -38,6 +41,7 @@ struct seq_operations {
76328 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
76329 int (*show) (struct seq_file *m, void *v);
76330 };
76331+typedef struct seq_operations __no_const seq_operations_no_const;
76332
76333 #define SEQ_SKIP 1
76334
76335diff --git a/include/linux/shm.h b/include/linux/shm.h
76336index 429c199..4d42e38 100644
76337--- a/include/linux/shm.h
76338+++ b/include/linux/shm.h
76339@@ -21,6 +21,10 @@ struct shmid_kernel /* private to the kernel */
76340
76341 /* The task created the shm object. NULL if the task is dead. */
76342 struct task_struct *shm_creator;
76343+#ifdef CONFIG_GRKERNSEC
76344+ time_t shm_createtime;
76345+ pid_t shm_lapid;
76346+#endif
76347 };
76348
76349 /* shm_mode upper byte flags */
76350diff --git a/include/linux/signal.h b/include/linux/signal.h
76351index d897484..323ba98 100644
76352--- a/include/linux/signal.h
76353+++ b/include/linux/signal.h
76354@@ -433,6 +433,7 @@ void signals_init(void);
76355
76356 int restore_altstack(const stack_t __user *);
76357 int __save_altstack(stack_t __user *, unsigned long);
76358+void __save_altstack_ex(stack_t __user *, unsigned long);
76359
76360 #ifdef CONFIG_PROC_FS
76361 struct seq_file;
76362diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
76363index dec1748..112c1f9 100644
76364--- a/include/linux/skbuff.h
76365+++ b/include/linux/skbuff.h
76366@@ -640,7 +640,7 @@ extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
76367 extern struct sk_buff *__alloc_skb(unsigned int size,
76368 gfp_t priority, int flags, int node);
76369 extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
76370-static inline struct sk_buff *alloc_skb(unsigned int size,
76371+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
76372 gfp_t priority)
76373 {
76374 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
76375@@ -756,7 +756,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
76376 */
76377 static inline int skb_queue_empty(const struct sk_buff_head *list)
76378 {
76379- return list->next == (struct sk_buff *)list;
76380+ return list->next == (const struct sk_buff *)list;
76381 }
76382
76383 /**
76384@@ -769,7 +769,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
76385 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
76386 const struct sk_buff *skb)
76387 {
76388- return skb->next == (struct sk_buff *)list;
76389+ return skb->next == (const struct sk_buff *)list;
76390 }
76391
76392 /**
76393@@ -782,7 +782,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
76394 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
76395 const struct sk_buff *skb)
76396 {
76397- return skb->prev == (struct sk_buff *)list;
76398+ return skb->prev == (const struct sk_buff *)list;
76399 }
76400
76401 /**
76402@@ -1848,7 +1848,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
76403 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
76404 */
76405 #ifndef NET_SKB_PAD
76406-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
76407+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
76408 #endif
76409
76410 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
76411@@ -2443,7 +2443,7 @@ extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
76412 int noblock, int *err);
76413 extern unsigned int datagram_poll(struct file *file, struct socket *sock,
76414 struct poll_table_struct *wait);
76415-extern int skb_copy_datagram_iovec(const struct sk_buff *from,
76416+extern int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from,
76417 int offset, struct iovec *to,
76418 int size);
76419 extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
76420@@ -2733,6 +2733,9 @@ static inline void nf_reset(struct sk_buff *skb)
76421 nf_bridge_put(skb->nf_bridge);
76422 skb->nf_bridge = NULL;
76423 #endif
76424+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
76425+ skb->nf_trace = 0;
76426+#endif
76427 }
76428
76429 static inline void nf_reset_trace(struct sk_buff *skb)
76430diff --git a/include/linux/slab.h b/include/linux/slab.h
76431index 0c62175..f016ac1 100644
76432--- a/include/linux/slab.h
76433+++ b/include/linux/slab.h
76434@@ -12,15 +12,29 @@
76435 #include <linux/gfp.h>
76436 #include <linux/types.h>
76437 #include <linux/workqueue.h>
76438-
76439+#include <linux/err.h>
76440
76441 /*
76442 * Flags to pass to kmem_cache_create().
76443 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
76444 */
76445 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
76446+
76447+#ifdef CONFIG_PAX_USERCOPY_SLABS
76448+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
76449+#else
76450+#define SLAB_USERCOPY 0x00000000UL
76451+#endif
76452+
76453 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
76454 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
76455+
76456+#ifdef CONFIG_PAX_MEMORY_SANITIZE
76457+#define SLAB_NO_SANITIZE 0x00001000UL /* PaX: Do not sanitize objs on free */
76458+#else
76459+#define SLAB_NO_SANITIZE 0x00000000UL
76460+#endif
76461+
76462 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
76463 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
76464 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
76465@@ -89,10 +103,13 @@
76466 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
76467 * Both make kfree a no-op.
76468 */
76469-#define ZERO_SIZE_PTR ((void *)16)
76470+#define ZERO_SIZE_PTR \
76471+({ \
76472+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
76473+ (void *)(-MAX_ERRNO-1L); \
76474+})
76475
76476-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
76477- (unsigned long)ZERO_SIZE_PTR)
76478+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
76479
76480
76481 struct mem_cgroup;
76482@@ -132,6 +149,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
76483 void kfree(const void *);
76484 void kzfree(const void *);
76485 size_t ksize(const void *);
76486+const char *check_heap_object(const void *ptr, unsigned long n);
76487+bool is_usercopy_object(const void *ptr);
76488
76489 /*
76490 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
76491@@ -164,7 +183,7 @@ struct kmem_cache {
76492 unsigned int align; /* Alignment as calculated */
76493 unsigned long flags; /* Active flags on the slab */
76494 const char *name; /* Slab name for sysfs */
76495- int refcount; /* Use counter */
76496+ atomic_t refcount; /* Use counter */
76497 void (*ctor)(void *); /* Called on object slot creation */
76498 struct list_head list; /* List of all slab caches on the system */
76499 };
76500@@ -226,6 +245,10 @@ extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
76501 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
76502 #endif
76503
76504+#ifdef CONFIG_PAX_USERCOPY_SLABS
76505+extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
76506+#endif
76507+
76508 /*
76509 * Figure out which kmalloc slab an allocation of a certain size
76510 * belongs to.
76511@@ -234,7 +257,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
76512 * 2 = 120 .. 192 bytes
76513 * n = 2^(n-1) .. 2^n -1
76514 */
76515-static __always_inline int kmalloc_index(size_t size)
76516+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
76517 {
76518 if (!size)
76519 return 0;
76520@@ -406,6 +429,7 @@ void print_slabinfo_header(struct seq_file *m);
76521 * for general use, and so are not documented here. For a full list of
76522 * potential flags, always refer to linux/gfp.h.
76523 */
76524+
76525 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
76526 {
76527 if (size != 0 && n > SIZE_MAX / size)
76528@@ -465,7 +489,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
76529 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
76530 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
76531 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
76532-extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
76533+extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
76534 #define kmalloc_track_caller(size, flags) \
76535 __kmalloc_track_caller(size, flags, _RET_IP_)
76536 #else
76537@@ -485,7 +509,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
76538 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
76539 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
76540 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
76541-extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
76542+extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
76543 #define kmalloc_node_track_caller(size, flags, node) \
76544 __kmalloc_node_track_caller(size, flags, node, \
76545 _RET_IP_)
76546diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
76547index cd40158..4e2f7af 100644
76548--- a/include/linux/slab_def.h
76549+++ b/include/linux/slab_def.h
76550@@ -50,7 +50,7 @@ struct kmem_cache {
76551 /* 4) cache creation/removal */
76552 const char *name;
76553 struct list_head list;
76554- int refcount;
76555+ atomic_t refcount;
76556 int object_size;
76557 int align;
76558
76559@@ -66,10 +66,14 @@ struct kmem_cache {
76560 unsigned long node_allocs;
76561 unsigned long node_frees;
76562 unsigned long node_overflow;
76563- atomic_t allochit;
76564- atomic_t allocmiss;
76565- atomic_t freehit;
76566- atomic_t freemiss;
76567+ atomic_unchecked_t allochit;
76568+ atomic_unchecked_t allocmiss;
76569+ atomic_unchecked_t freehit;
76570+ atomic_unchecked_t freemiss;
76571+#ifdef CONFIG_PAX_MEMORY_SANITIZE
76572+ atomic_unchecked_t sanitized;
76573+ atomic_unchecked_t not_sanitized;
76574+#endif
76575
76576 /*
76577 * If debugging is enabled, then the allocator can add additional
76578@@ -103,7 +107,7 @@ struct kmem_cache {
76579 };
76580
76581 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
76582-void *__kmalloc(size_t size, gfp_t flags);
76583+void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
76584
76585 #ifdef CONFIG_TRACING
76586 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
76587@@ -136,6 +140,13 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
76588 cachep = kmalloc_dma_caches[i];
76589 else
76590 #endif
76591+
76592+#ifdef CONFIG_PAX_USERCOPY_SLABS
76593+ if (flags & GFP_USERCOPY)
76594+ cachep = kmalloc_usercopy_caches[i];
76595+ else
76596+#endif
76597+
76598 cachep = kmalloc_caches[i];
76599
76600 ret = kmem_cache_alloc_trace(cachep, flags, size);
76601@@ -146,7 +157,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
76602 }
76603
76604 #ifdef CONFIG_NUMA
76605-extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
76606+extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
76607 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
76608
76609 #ifdef CONFIG_TRACING
76610@@ -185,6 +196,13 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
76611 cachep = kmalloc_dma_caches[i];
76612 else
76613 #endif
76614+
76615+#ifdef CONFIG_PAX_USERCOPY_SLABS
76616+ if (flags & GFP_USERCOPY)
76617+ cachep = kmalloc_usercopy_caches[i];
76618+ else
76619+#endif
76620+
76621 cachep = kmalloc_caches[i];
76622
76623 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
76624diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
76625index f28e14a..7831211 100644
76626--- a/include/linux/slob_def.h
76627+++ b/include/linux/slob_def.h
76628@@ -11,7 +11,7 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
76629 return kmem_cache_alloc_node(cachep, flags, NUMA_NO_NODE);
76630 }
76631
76632-void *__kmalloc_node(size_t size, gfp_t flags, int node);
76633+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
76634
76635 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
76636 {
76637@@ -31,7 +31,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
76638 return __kmalloc_node(size, flags, NUMA_NO_NODE);
76639 }
76640
76641-static __always_inline void *__kmalloc(size_t size, gfp_t flags)
76642+static __always_inline __size_overflow(1) void *__kmalloc(size_t size, gfp_t flags)
76643 {
76644 return kmalloc(size, flags);
76645 }
76646diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
76647index 027276f..092bfe8 100644
76648--- a/include/linux/slub_def.h
76649+++ b/include/linux/slub_def.h
76650@@ -80,7 +80,7 @@ struct kmem_cache {
76651 struct kmem_cache_order_objects max;
76652 struct kmem_cache_order_objects min;
76653 gfp_t allocflags; /* gfp flags to use on each alloc */
76654- int refcount; /* Refcount for slab cache destroy */
76655+ atomic_t refcount; /* Refcount for slab cache destroy */
76656 void (*ctor)(void *);
76657 int inuse; /* Offset to metadata */
76658 int align; /* Alignment */
76659@@ -105,7 +105,7 @@ struct kmem_cache {
76660 };
76661
76662 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
76663-void *__kmalloc(size_t size, gfp_t flags);
76664+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
76665
76666 static __always_inline void *
76667 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
76668@@ -149,7 +149,7 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
76669 }
76670 #endif
76671
76672-static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
76673+static __always_inline __size_overflow(1) void *kmalloc_large(size_t size, gfp_t flags)
76674 {
76675 unsigned int order = get_order(size);
76676 return kmalloc_order_trace(size, flags, order);
76677@@ -175,7 +175,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
76678 }
76679
76680 #ifdef CONFIG_NUMA
76681-void *__kmalloc_node(size_t size, gfp_t flags, int node);
76682+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
76683 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
76684
76685 #ifdef CONFIG_TRACING
76686diff --git a/include/linux/smp.h b/include/linux/smp.h
76687index c848876..11e8a84 100644
76688--- a/include/linux/smp.h
76689+++ b/include/linux/smp.h
76690@@ -221,7 +221,9 @@ static inline void kick_all_cpus_sync(void) { }
76691 #endif
76692
76693 #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
76694+#define raw_get_cpu() ({ raw_preempt_disable(); raw_smp_processor_id(); })
76695 #define put_cpu() preempt_enable()
76696+#define raw_put_cpu_no_resched() raw_preempt_enable_no_resched()
76697
76698 /*
76699 * Callback to arch code if there's nosmp or maxcpus=0 on the
76700diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
76701index 54f91d3..be2c379 100644
76702--- a/include/linux/sock_diag.h
76703+++ b/include/linux/sock_diag.h
76704@@ -11,7 +11,7 @@ struct sock;
76705 struct sock_diag_handler {
76706 __u8 family;
76707 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
76708-};
76709+} __do_const;
76710
76711 int sock_diag_register(const struct sock_diag_handler *h);
76712 void sock_diag_unregister(const struct sock_diag_handler *h);
76713diff --git a/include/linux/sonet.h b/include/linux/sonet.h
76714index 680f9a3..f13aeb0 100644
76715--- a/include/linux/sonet.h
76716+++ b/include/linux/sonet.h
76717@@ -7,7 +7,7 @@
76718 #include <uapi/linux/sonet.h>
76719
76720 struct k_sonet_stats {
76721-#define __HANDLE_ITEM(i) atomic_t i
76722+#define __HANDLE_ITEM(i) atomic_unchecked_t i
76723 __SONET_ITEMS
76724 #undef __HANDLE_ITEM
76725 };
76726diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
76727index 07d8e53..dc934c9 100644
76728--- a/include/linux/sunrpc/addr.h
76729+++ b/include/linux/sunrpc/addr.h
76730@@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
76731 {
76732 switch (sap->sa_family) {
76733 case AF_INET:
76734- return ntohs(((struct sockaddr_in *)sap)->sin_port);
76735+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
76736 case AF_INET6:
76737- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
76738+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
76739 }
76740 return 0;
76741 }
76742@@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
76743 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
76744 const struct sockaddr *src)
76745 {
76746- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
76747+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
76748 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
76749
76750 dsin->sin_family = ssin->sin_family;
76751@@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
76752 if (sa->sa_family != AF_INET6)
76753 return 0;
76754
76755- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
76756+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
76757 }
76758
76759 #endif /* _LINUX_SUNRPC_ADDR_H */
76760diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
76761index bfe11be..12bc8c4 100644
76762--- a/include/linux/sunrpc/clnt.h
76763+++ b/include/linux/sunrpc/clnt.h
76764@@ -96,7 +96,7 @@ struct rpc_procinfo {
76765 unsigned int p_timer; /* Which RTT timer to use */
76766 u32 p_statidx; /* Which procedure to account */
76767 const char * p_name; /* name of procedure */
76768-};
76769+} __do_const;
76770
76771 #ifdef __KERNEL__
76772
76773diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
76774index 1f0216b..6a4fa50 100644
76775--- a/include/linux/sunrpc/svc.h
76776+++ b/include/linux/sunrpc/svc.h
76777@@ -411,7 +411,7 @@ struct svc_procedure {
76778 unsigned int pc_count; /* call count */
76779 unsigned int pc_cachetype; /* cache info (NFS) */
76780 unsigned int pc_xdrressize; /* maximum size of XDR reply */
76781-};
76782+} __do_const;
76783
76784 /*
76785 * Function prototypes.
76786diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
76787index 0b8e3e6..33e0a01 100644
76788--- a/include/linux/sunrpc/svc_rdma.h
76789+++ b/include/linux/sunrpc/svc_rdma.h
76790@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
76791 extern unsigned int svcrdma_max_requests;
76792 extern unsigned int svcrdma_max_req_size;
76793
76794-extern atomic_t rdma_stat_recv;
76795-extern atomic_t rdma_stat_read;
76796-extern atomic_t rdma_stat_write;
76797-extern atomic_t rdma_stat_sq_starve;
76798-extern atomic_t rdma_stat_rq_starve;
76799-extern atomic_t rdma_stat_rq_poll;
76800-extern atomic_t rdma_stat_rq_prod;
76801-extern atomic_t rdma_stat_sq_poll;
76802-extern atomic_t rdma_stat_sq_prod;
76803+extern atomic_unchecked_t rdma_stat_recv;
76804+extern atomic_unchecked_t rdma_stat_read;
76805+extern atomic_unchecked_t rdma_stat_write;
76806+extern atomic_unchecked_t rdma_stat_sq_starve;
76807+extern atomic_unchecked_t rdma_stat_rq_starve;
76808+extern atomic_unchecked_t rdma_stat_rq_poll;
76809+extern atomic_unchecked_t rdma_stat_rq_prod;
76810+extern atomic_unchecked_t rdma_stat_sq_poll;
76811+extern atomic_unchecked_t rdma_stat_sq_prod;
76812
76813 #define RPCRDMA_VERSION 1
76814
76815diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
76816index ff374ab..7fd2ecb 100644
76817--- a/include/linux/sunrpc/svcauth.h
76818+++ b/include/linux/sunrpc/svcauth.h
76819@@ -109,7 +109,7 @@ struct auth_ops {
76820 int (*release)(struct svc_rqst *rq);
76821 void (*domain_release)(struct auth_domain *);
76822 int (*set_client)(struct svc_rqst *rq);
76823-};
76824+} __do_const;
76825
76826 #define SVC_GARBAGE 1
76827 #define SVC_SYSERR 2
76828diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
76829index a5ffd32..0935dea 100644
76830--- a/include/linux/swiotlb.h
76831+++ b/include/linux/swiotlb.h
76832@@ -60,7 +60,8 @@ extern void
76833
76834 extern void
76835 swiotlb_free_coherent(struct device *hwdev, size_t size,
76836- void *vaddr, dma_addr_t dma_handle);
76837+ void *vaddr, dma_addr_t dma_handle,
76838+ struct dma_attrs *attrs);
76839
76840 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
76841 unsigned long offset, size_t size,
76842diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
76843index 84662ec..d8f8adb 100644
76844--- a/include/linux/syscalls.h
76845+++ b/include/linux/syscalls.h
76846@@ -97,8 +97,12 @@ struct sigaltstack;
76847 #define __MAP(n,...) __MAP##n(__VA_ARGS__)
76848
76849 #define __SC_DECL(t, a) t a
76850-#define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
76851-#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
76852+#define __TYPE_IS_SL(t) (__same_type((t)0, 0L))
76853+#define __TYPE_IS_UL(t) (__same_type((t)0, 0UL))
76854+#define __TYPE_IS_SLL(t) (__same_type((t)0, 0LL))
76855+#define __TYPE_IS_ULL(t) (__same_type((t)0, 0ULL))
76856+#define __TYPE_IS_LL(t) (__TYPE_IS_SLL(t) || __TYPE_IS_ULL(t))
76857+#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), __builtin_choose_expr(__TYPE_IS_ULL(t), 0ULL, 0LL), __builtin_choose_expr(__TYPE_IS_UL(t), 0UL, 0L))) a
76858 #define __SC_CAST(t, a) (t) a
76859 #define __SC_ARGS(t, a) a
76860 #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
76861@@ -362,11 +366,11 @@ asmlinkage long sys_sync(void);
76862 asmlinkage long sys_fsync(unsigned int fd);
76863 asmlinkage long sys_fdatasync(unsigned int fd);
76864 asmlinkage long sys_bdflush(int func, long data);
76865-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
76866- char __user *type, unsigned long flags,
76867+asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
76868+ const char __user *type, unsigned long flags,
76869 void __user *data);
76870-asmlinkage long sys_umount(char __user *name, int flags);
76871-asmlinkage long sys_oldumount(char __user *name);
76872+asmlinkage long sys_umount(const char __user *name, int flags);
76873+asmlinkage long sys_oldumount(const char __user *name);
76874 asmlinkage long sys_truncate(const char __user *path, long length);
76875 asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
76876 asmlinkage long sys_stat(const char __user *filename,
76877@@ -578,7 +582,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
76878 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
76879 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
76880 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
76881- struct sockaddr __user *, int);
76882+ struct sockaddr __user *, int) __intentional_overflow(0);
76883 asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
76884 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
76885 unsigned int vlen, unsigned flags);
76886diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
76887index 27b3b0b..e093dd9 100644
76888--- a/include/linux/syscore_ops.h
76889+++ b/include/linux/syscore_ops.h
76890@@ -16,7 +16,7 @@ struct syscore_ops {
76891 int (*suspend)(void);
76892 void (*resume)(void);
76893 void (*shutdown)(void);
76894-};
76895+} __do_const;
76896
76897 extern void register_syscore_ops(struct syscore_ops *ops);
76898 extern void unregister_syscore_ops(struct syscore_ops *ops);
76899diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
76900index 14a8ff2..af52bad 100644
76901--- a/include/linux/sysctl.h
76902+++ b/include/linux/sysctl.h
76903@@ -34,13 +34,13 @@ struct ctl_table_root;
76904 struct ctl_table_header;
76905 struct ctl_dir;
76906
76907-typedef struct ctl_table ctl_table;
76908-
76909 typedef int proc_handler (struct ctl_table *ctl, int write,
76910 void __user *buffer, size_t *lenp, loff_t *ppos);
76911
76912 extern int proc_dostring(struct ctl_table *, int,
76913 void __user *, size_t *, loff_t *);
76914+extern int proc_dostring_modpriv(struct ctl_table *, int,
76915+ void __user *, size_t *, loff_t *);
76916 extern int proc_dointvec(struct ctl_table *, int,
76917 void __user *, size_t *, loff_t *);
76918 extern int proc_dointvec_minmax(struct ctl_table *, int,
76919@@ -115,7 +115,9 @@ struct ctl_table
76920 struct ctl_table_poll *poll;
76921 void *extra1;
76922 void *extra2;
76923-};
76924+} __do_const;
76925+typedef struct ctl_table __no_const ctl_table_no_const;
76926+typedef struct ctl_table ctl_table;
76927
76928 struct ctl_node {
76929 struct rb_node node;
76930diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
76931index e2cee22..3ddb921 100644
76932--- a/include/linux/sysfs.h
76933+++ b/include/linux/sysfs.h
76934@@ -31,7 +31,8 @@ struct attribute {
76935 struct lock_class_key *key;
76936 struct lock_class_key skey;
76937 #endif
76938-};
76939+} __do_const;
76940+typedef struct attribute __no_const attribute_no_const;
76941
76942 /**
76943 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
76944@@ -59,8 +60,8 @@ struct attribute_group {
76945 umode_t (*is_visible)(struct kobject *,
76946 struct attribute *, int);
76947 struct attribute **attrs;
76948-};
76949-
76950+} __do_const;
76951+typedef struct attribute_group __no_const attribute_group_no_const;
76952
76953
76954 /**
76955@@ -107,7 +108,8 @@ struct bin_attribute {
76956 char *, loff_t, size_t);
76957 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
76958 struct vm_area_struct *vma);
76959-};
76960+} __do_const;
76961+typedef struct bin_attribute __no_const bin_attribute_no_const;
76962
76963 /**
76964 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
76965diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
76966index 7faf933..9b85a0c 100644
76967--- a/include/linux/sysrq.h
76968+++ b/include/linux/sysrq.h
76969@@ -16,6 +16,7 @@
76970
76971 #include <linux/errno.h>
76972 #include <linux/types.h>
76973+#include <linux/compiler.h>
76974
76975 /* Enable/disable SYSRQ support by default (0==no, 1==yes). */
76976 #define SYSRQ_DEFAULT_ENABLE 1
76977@@ -36,7 +37,7 @@ struct sysrq_key_op {
76978 char *help_msg;
76979 char *action_msg;
76980 int enable_mask;
76981-};
76982+} __do_const;
76983
76984 #ifdef CONFIG_MAGIC_SYSRQ
76985
76986diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
76987index e7e0473..7989295 100644
76988--- a/include/linux/thread_info.h
76989+++ b/include/linux/thread_info.h
76990@@ -148,6 +148,15 @@ static inline bool test_and_clear_restore_sigmask(void)
76991 #error "no set_restore_sigmask() provided and default one won't work"
76992 #endif
76993
76994+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user);
76995+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
76996+{
76997+#ifndef CONFIG_PAX_USERCOPY_DEBUG
76998+ if (!__builtin_constant_p(n))
76999+#endif
77000+ __check_object_size(ptr, n, to_user);
77001+}
77002+
77003 #endif /* __KERNEL__ */
77004
77005 #endif /* _LINUX_THREAD_INFO_H */
77006diff --git a/include/linux/tty.h b/include/linux/tty.h
77007index 8780bd2..d1ae08b 100644
77008--- a/include/linux/tty.h
77009+++ b/include/linux/tty.h
77010@@ -194,7 +194,7 @@ struct tty_port {
77011 const struct tty_port_operations *ops; /* Port operations */
77012 spinlock_t lock; /* Lock protecting tty field */
77013 int blocked_open; /* Waiting to open */
77014- int count; /* Usage count */
77015+ atomic_t count; /* Usage count */
77016 wait_queue_head_t open_wait; /* Open waiters */
77017 wait_queue_head_t close_wait; /* Close waiters */
77018 wait_queue_head_t delta_msr_wait; /* Modem status change */
77019@@ -550,7 +550,7 @@ extern int tty_port_open(struct tty_port *port,
77020 struct tty_struct *tty, struct file *filp);
77021 static inline int tty_port_users(struct tty_port *port)
77022 {
77023- return port->count + port->blocked_open;
77024+ return atomic_read(&port->count) + port->blocked_open;
77025 }
77026
77027 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
77028diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
77029index 756a609..b302dd6 100644
77030--- a/include/linux/tty_driver.h
77031+++ b/include/linux/tty_driver.h
77032@@ -285,7 +285,7 @@ struct tty_operations {
77033 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
77034 #endif
77035 const struct file_operations *proc_fops;
77036-};
77037+} __do_const;
77038
77039 struct tty_driver {
77040 int magic; /* magic number for this structure */
77041diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
77042index 58390c7..95e214c 100644
77043--- a/include/linux/tty_ldisc.h
77044+++ b/include/linux/tty_ldisc.h
77045@@ -146,7 +146,7 @@ struct tty_ldisc_ops {
77046
77047 struct module *owner;
77048
77049- int refcount;
77050+ atomic_t refcount;
77051 };
77052
77053 struct tty_ldisc {
77054diff --git a/include/linux/types.h b/include/linux/types.h
77055index 4d118ba..c3ee9bf 100644
77056--- a/include/linux/types.h
77057+++ b/include/linux/types.h
77058@@ -176,10 +176,26 @@ typedef struct {
77059 int counter;
77060 } atomic_t;
77061
77062+#ifdef CONFIG_PAX_REFCOUNT
77063+typedef struct {
77064+ int counter;
77065+} atomic_unchecked_t;
77066+#else
77067+typedef atomic_t atomic_unchecked_t;
77068+#endif
77069+
77070 #ifdef CONFIG_64BIT
77071 typedef struct {
77072 long counter;
77073 } atomic64_t;
77074+
77075+#ifdef CONFIG_PAX_REFCOUNT
77076+typedef struct {
77077+ long counter;
77078+} atomic64_unchecked_t;
77079+#else
77080+typedef atomic64_t atomic64_unchecked_t;
77081+#endif
77082 #endif
77083
77084 struct list_head {
77085diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
77086index 5ca0951..ab496a5 100644
77087--- a/include/linux/uaccess.h
77088+++ b/include/linux/uaccess.h
77089@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
77090 long ret; \
77091 mm_segment_t old_fs = get_fs(); \
77092 \
77093- set_fs(KERNEL_DS); \
77094 pagefault_disable(); \
77095- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
77096- pagefault_enable(); \
77097+ set_fs(KERNEL_DS); \
77098+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
77099 set_fs(old_fs); \
77100+ pagefault_enable(); \
77101 ret; \
77102 })
77103
77104diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
77105index 8e522cbc..aa8572d 100644
77106--- a/include/linux/uidgid.h
77107+++ b/include/linux/uidgid.h
77108@@ -197,4 +197,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
77109
77110 #endif /* CONFIG_USER_NS */
77111
77112+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
77113+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
77114+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
77115+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
77116+
77117 #endif /* _LINUX_UIDGID_H */
77118diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
77119index 99c1b4d..562e6f3 100644
77120--- a/include/linux/unaligned/access_ok.h
77121+++ b/include/linux/unaligned/access_ok.h
77122@@ -4,34 +4,34 @@
77123 #include <linux/kernel.h>
77124 #include <asm/byteorder.h>
77125
77126-static inline u16 get_unaligned_le16(const void *p)
77127+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
77128 {
77129- return le16_to_cpup((__le16 *)p);
77130+ return le16_to_cpup((const __le16 *)p);
77131 }
77132
77133-static inline u32 get_unaligned_le32(const void *p)
77134+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
77135 {
77136- return le32_to_cpup((__le32 *)p);
77137+ return le32_to_cpup((const __le32 *)p);
77138 }
77139
77140-static inline u64 get_unaligned_le64(const void *p)
77141+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
77142 {
77143- return le64_to_cpup((__le64 *)p);
77144+ return le64_to_cpup((const __le64 *)p);
77145 }
77146
77147-static inline u16 get_unaligned_be16(const void *p)
77148+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
77149 {
77150- return be16_to_cpup((__be16 *)p);
77151+ return be16_to_cpup((const __be16 *)p);
77152 }
77153
77154-static inline u32 get_unaligned_be32(const void *p)
77155+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
77156 {
77157- return be32_to_cpup((__be32 *)p);
77158+ return be32_to_cpup((const __be32 *)p);
77159 }
77160
77161-static inline u64 get_unaligned_be64(const void *p)
77162+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
77163 {
77164- return be64_to_cpup((__be64 *)p);
77165+ return be64_to_cpup((const __be64 *)p);
77166 }
77167
77168 static inline void put_unaligned_le16(u16 val, void *p)
77169diff --git a/include/linux/usb.h b/include/linux/usb.h
77170index a0bee5a..5533a52 100644
77171--- a/include/linux/usb.h
77172+++ b/include/linux/usb.h
77173@@ -552,7 +552,7 @@ struct usb_device {
77174 int maxchild;
77175
77176 u32 quirks;
77177- atomic_t urbnum;
77178+ atomic_unchecked_t urbnum;
77179
77180 unsigned long active_duration;
77181
77182@@ -1607,7 +1607,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
77183
77184 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
77185 __u8 request, __u8 requesttype, __u16 value, __u16 index,
77186- void *data, __u16 size, int timeout);
77187+ void *data, __u16 size, int timeout) __intentional_overflow(-1);
77188 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
77189 void *data, int len, int *actual_length, int timeout);
77190 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
77191diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
77192index e452ba6..78f8e80 100644
77193--- a/include/linux/usb/renesas_usbhs.h
77194+++ b/include/linux/usb/renesas_usbhs.h
77195@@ -39,7 +39,7 @@ enum {
77196 */
77197 struct renesas_usbhs_driver_callback {
77198 int (*notify_hotplug)(struct platform_device *pdev);
77199-};
77200+} __no_const;
77201
77202 /*
77203 * callback functions for platform
77204diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
77205index 6f8fbcf..8259001 100644
77206--- a/include/linux/vermagic.h
77207+++ b/include/linux/vermagic.h
77208@@ -25,9 +25,35 @@
77209 #define MODULE_ARCH_VERMAGIC ""
77210 #endif
77211
77212+#ifdef CONFIG_PAX_REFCOUNT
77213+#define MODULE_PAX_REFCOUNT "REFCOUNT "
77214+#else
77215+#define MODULE_PAX_REFCOUNT ""
77216+#endif
77217+
77218+#ifdef CONSTIFY_PLUGIN
77219+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
77220+#else
77221+#define MODULE_CONSTIFY_PLUGIN ""
77222+#endif
77223+
77224+#ifdef STACKLEAK_PLUGIN
77225+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
77226+#else
77227+#define MODULE_STACKLEAK_PLUGIN ""
77228+#endif
77229+
77230+#ifdef CONFIG_GRKERNSEC
77231+#define MODULE_GRSEC "GRSEC "
77232+#else
77233+#define MODULE_GRSEC ""
77234+#endif
77235+
77236 #define VERMAGIC_STRING \
77237 UTS_RELEASE " " \
77238 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
77239 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
77240- MODULE_ARCH_VERMAGIC
77241+ MODULE_ARCH_VERMAGIC \
77242+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
77243+ MODULE_GRSEC
77244
77245diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
77246index 7d5773a..541c01c 100644
77247--- a/include/linux/vmalloc.h
77248+++ b/include/linux/vmalloc.h
77249@@ -16,6 +16,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
77250 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
77251 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
77252 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
77253+
77254+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
77255+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
77256+#endif
77257+
77258 /* bits [20..32] reserved for arch specific ioremap internals */
77259
77260 /*
77261@@ -75,7 +80,7 @@ extern void *vmalloc_32_user(unsigned long size);
77262 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
77263 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
77264 unsigned long start, unsigned long end, gfp_t gfp_mask,
77265- pgprot_t prot, int node, const void *caller);
77266+ pgprot_t prot, int node, const void *caller) __size_overflow(1);
77267 extern void vfree(const void *addr);
77268
77269 extern void *vmap(struct page **pages, unsigned int count,
77270@@ -137,8 +142,8 @@ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
77271 extern void free_vm_area(struct vm_struct *area);
77272
77273 /* for /dev/kmem */
77274-extern long vread(char *buf, char *addr, unsigned long count);
77275-extern long vwrite(char *buf, char *addr, unsigned long count);
77276+extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
77277+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
77278
77279 /*
77280 * Internals. Dont't use..
77281diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
77282index c586679..f06b389 100644
77283--- a/include/linux/vmstat.h
77284+++ b/include/linux/vmstat.h
77285@@ -90,18 +90,18 @@ static inline void vm_events_fold_cpu(int cpu)
77286 /*
77287 * Zone based page accounting with per cpu differentials.
77288 */
77289-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
77290+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
77291
77292 static inline void zone_page_state_add(long x, struct zone *zone,
77293 enum zone_stat_item item)
77294 {
77295- atomic_long_add(x, &zone->vm_stat[item]);
77296- atomic_long_add(x, &vm_stat[item]);
77297+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
77298+ atomic_long_add_unchecked(x, &vm_stat[item]);
77299 }
77300
77301 static inline unsigned long global_page_state(enum zone_stat_item item)
77302 {
77303- long x = atomic_long_read(&vm_stat[item]);
77304+ long x = atomic_long_read_unchecked(&vm_stat[item]);
77305 #ifdef CONFIG_SMP
77306 if (x < 0)
77307 x = 0;
77308@@ -112,7 +112,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
77309 static inline unsigned long zone_page_state(struct zone *zone,
77310 enum zone_stat_item item)
77311 {
77312- long x = atomic_long_read(&zone->vm_stat[item]);
77313+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
77314 #ifdef CONFIG_SMP
77315 if (x < 0)
77316 x = 0;
77317@@ -129,7 +129,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
77318 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
77319 enum zone_stat_item item)
77320 {
77321- long x = atomic_long_read(&zone->vm_stat[item]);
77322+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
77323
77324 #ifdef CONFIG_SMP
77325 int cpu;
77326@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
77327
77328 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
77329 {
77330- atomic_long_inc(&zone->vm_stat[item]);
77331- atomic_long_inc(&vm_stat[item]);
77332+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
77333+ atomic_long_inc_unchecked(&vm_stat[item]);
77334 }
77335
77336 static inline void __inc_zone_page_state(struct page *page,
77337@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
77338
77339 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
77340 {
77341- atomic_long_dec(&zone->vm_stat[item]);
77342- atomic_long_dec(&vm_stat[item]);
77343+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
77344+ atomic_long_dec_unchecked(&vm_stat[item]);
77345 }
77346
77347 static inline void __dec_zone_page_state(struct page *page,
77348diff --git a/include/linux/xattr.h b/include/linux/xattr.h
77349index fdbafc6..49dfe4f 100644
77350--- a/include/linux/xattr.h
77351+++ b/include/linux/xattr.h
77352@@ -28,7 +28,7 @@ struct xattr_handler {
77353 size_t size, int handler_flags);
77354 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
77355 size_t size, int flags, int handler_flags);
77356-};
77357+} __do_const;
77358
77359 struct xattr {
77360 char *name;
77361@@ -37,6 +37,9 @@ struct xattr {
77362 };
77363
77364 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
77365+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
77366+ssize_t pax_getxattr(struct dentry *, void *, size_t);
77367+#endif
77368 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
77369 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
77370 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
77371diff --git a/include/linux/zlib.h b/include/linux/zlib.h
77372index 9c5a6b4..09c9438 100644
77373--- a/include/linux/zlib.h
77374+++ b/include/linux/zlib.h
77375@@ -31,6 +31,7 @@
77376 #define _ZLIB_H
77377
77378 #include <linux/zconf.h>
77379+#include <linux/compiler.h>
77380
77381 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
77382 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
77383@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
77384
77385 /* basic functions */
77386
77387-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
77388+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
77389 /*
77390 Returns the number of bytes that needs to be allocated for a per-
77391 stream workspace with the specified parameters. A pointer to this
77392diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
77393index 95d1c91..6798cca 100644
77394--- a/include/media/v4l2-dev.h
77395+++ b/include/media/v4l2-dev.h
77396@@ -76,7 +76,7 @@ struct v4l2_file_operations {
77397 int (*mmap) (struct file *, struct vm_area_struct *);
77398 int (*open) (struct file *);
77399 int (*release) (struct file *);
77400-};
77401+} __do_const;
77402
77403 /*
77404 * Newer version of video_device, handled by videodev2.c
77405diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
77406index adcbb20..62c2559 100644
77407--- a/include/net/9p/transport.h
77408+++ b/include/net/9p/transport.h
77409@@ -57,7 +57,7 @@ struct p9_trans_module {
77410 int (*cancel) (struct p9_client *, struct p9_req_t *req);
77411 int (*zc_request)(struct p9_client *, struct p9_req_t *,
77412 char *, char *, int , int, int, int);
77413-};
77414+} __do_const;
77415
77416 void v9fs_register_trans(struct p9_trans_module *m);
77417 void v9fs_unregister_trans(struct p9_trans_module *m);
77418diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
77419index fb94cf1..7c0c987 100644
77420--- a/include/net/bluetooth/l2cap.h
77421+++ b/include/net/bluetooth/l2cap.h
77422@@ -551,7 +551,7 @@ struct l2cap_ops {
77423 void (*defer) (struct l2cap_chan *chan);
77424 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
77425 unsigned long len, int nb);
77426-};
77427+} __do_const;
77428
77429 struct l2cap_conn {
77430 struct hci_conn *hcon;
77431diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
77432index f2ae33d..c457cf0 100644
77433--- a/include/net/caif/cfctrl.h
77434+++ b/include/net/caif/cfctrl.h
77435@@ -52,7 +52,7 @@ struct cfctrl_rsp {
77436 void (*radioset_rsp)(void);
77437 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
77438 struct cflayer *client_layer);
77439-};
77440+} __no_const;
77441
77442 /* Link Setup Parameters for CAIF-Links. */
77443 struct cfctrl_link_param {
77444@@ -101,8 +101,8 @@ struct cfctrl_request_info {
77445 struct cfctrl {
77446 struct cfsrvl serv;
77447 struct cfctrl_rsp res;
77448- atomic_t req_seq_no;
77449- atomic_t rsp_seq_no;
77450+ atomic_unchecked_t req_seq_no;
77451+ atomic_unchecked_t rsp_seq_no;
77452 struct list_head list;
77453 /* Protects from simultaneous access to first_req list */
77454 spinlock_t info_list_lock;
77455diff --git a/include/net/flow.h b/include/net/flow.h
77456index 628e11b..4c475df 100644
77457--- a/include/net/flow.h
77458+++ b/include/net/flow.h
77459@@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
77460
77461 extern void flow_cache_flush(void);
77462 extern void flow_cache_flush_deferred(void);
77463-extern atomic_t flow_cache_genid;
77464+extern atomic_unchecked_t flow_cache_genid;
77465
77466 #endif
77467diff --git a/include/net/genetlink.h b/include/net/genetlink.h
77468index 93024a4..eeb6b6e 100644
77469--- a/include/net/genetlink.h
77470+++ b/include/net/genetlink.h
77471@@ -119,7 +119,7 @@ struct genl_ops {
77472 struct netlink_callback *cb);
77473 int (*done)(struct netlink_callback *cb);
77474 struct list_head ops_list;
77475-};
77476+} __do_const;
77477
77478 extern int genl_register_family(struct genl_family *family);
77479 extern int genl_register_family_with_ops(struct genl_family *family,
77480diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
77481index 734d9b5..48a9a4b 100644
77482--- a/include/net/gro_cells.h
77483+++ b/include/net/gro_cells.h
77484@@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
77485 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
77486
77487 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
77488- atomic_long_inc(&dev->rx_dropped);
77489+ atomic_long_inc_unchecked(&dev->rx_dropped);
77490 kfree_skb(skb);
77491 return;
77492 }
77493diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
77494index de2c785..0588a6b 100644
77495--- a/include/net/inet_connection_sock.h
77496+++ b/include/net/inet_connection_sock.h
77497@@ -62,7 +62,7 @@ struct inet_connection_sock_af_ops {
77498 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
77499 int (*bind_conflict)(const struct sock *sk,
77500 const struct inet_bind_bucket *tb, bool relax);
77501-};
77502+} __do_const;
77503
77504 /** inet_connection_sock - INET connection oriented sock
77505 *
77506diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
77507index 53f464d..ba76aaa 100644
77508--- a/include/net/inetpeer.h
77509+++ b/include/net/inetpeer.h
77510@@ -47,8 +47,8 @@ struct inet_peer {
77511 */
77512 union {
77513 struct {
77514- atomic_t rid; /* Frag reception counter */
77515- atomic_t ip_id_count; /* IP ID for the next packet */
77516+ atomic_unchecked_t rid; /* Frag reception counter */
77517+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
77518 };
77519 struct rcu_head rcu;
77520 struct inet_peer *gc_next;
77521@@ -182,11 +182,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
77522 more++;
77523 inet_peer_refcheck(p);
77524 do {
77525- old = atomic_read(&p->ip_id_count);
77526+ old = atomic_read_unchecked(&p->ip_id_count);
77527 new = old + more;
77528 if (!new)
77529 new = 1;
77530- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
77531+ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
77532 return new;
77533 }
77534
77535diff --git a/include/net/ip.h b/include/net/ip.h
77536index a68f838..74518ab 100644
77537--- a/include/net/ip.h
77538+++ b/include/net/ip.h
77539@@ -202,7 +202,7 @@ extern struct local_ports {
77540 } sysctl_local_ports;
77541 extern void inet_get_local_port_range(int *low, int *high);
77542
77543-extern unsigned long *sysctl_local_reserved_ports;
77544+extern unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
77545 static inline int inet_is_reserved_local_port(int port)
77546 {
77547 return test_bit(port, sysctl_local_reserved_ports);
77548diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
77549index e49db91..76a81de 100644
77550--- a/include/net/ip_fib.h
77551+++ b/include/net/ip_fib.h
77552@@ -167,7 +167,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
77553
77554 #define FIB_RES_SADDR(net, res) \
77555 ((FIB_RES_NH(res).nh_saddr_genid == \
77556- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
77557+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
77558 FIB_RES_NH(res).nh_saddr : \
77559 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
77560 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
77561diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
77562index 4c062cc..3562c31 100644
77563--- a/include/net/ip_vs.h
77564+++ b/include/net/ip_vs.h
77565@@ -612,7 +612,7 @@ struct ip_vs_conn {
77566 struct ip_vs_conn *control; /* Master control connection */
77567 atomic_t n_control; /* Number of controlled ones */
77568 struct ip_vs_dest *dest; /* real server */
77569- atomic_t in_pkts; /* incoming packet counter */
77570+ atomic_unchecked_t in_pkts; /* incoming packet counter */
77571
77572 /* packet transmitter for different forwarding methods. If it
77573 mangles the packet, it must return NF_DROP or better NF_STOLEN,
77574@@ -761,7 +761,7 @@ struct ip_vs_dest {
77575 __be16 port; /* port number of the server */
77576 union nf_inet_addr addr; /* IP address of the server */
77577 volatile unsigned int flags; /* dest status flags */
77578- atomic_t conn_flags; /* flags to copy to conn */
77579+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
77580 atomic_t weight; /* server weight */
77581
77582 atomic_t refcnt; /* reference counter */
77583@@ -1013,11 +1013,11 @@ struct netns_ipvs {
77584 /* ip_vs_lblc */
77585 int sysctl_lblc_expiration;
77586 struct ctl_table_header *lblc_ctl_header;
77587- struct ctl_table *lblc_ctl_table;
77588+ ctl_table_no_const *lblc_ctl_table;
77589 /* ip_vs_lblcr */
77590 int sysctl_lblcr_expiration;
77591 struct ctl_table_header *lblcr_ctl_header;
77592- struct ctl_table *lblcr_ctl_table;
77593+ ctl_table_no_const *lblcr_ctl_table;
77594 /* ip_vs_est */
77595 struct list_head est_list; /* estimator list */
77596 spinlock_t est_lock;
77597diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
77598index 80ffde3..968b0f4 100644
77599--- a/include/net/irda/ircomm_tty.h
77600+++ b/include/net/irda/ircomm_tty.h
77601@@ -35,6 +35,7 @@
77602 #include <linux/termios.h>
77603 #include <linux/timer.h>
77604 #include <linux/tty.h> /* struct tty_struct */
77605+#include <asm/local.h>
77606
77607 #include <net/irda/irias_object.h>
77608 #include <net/irda/ircomm_core.h>
77609diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
77610index 714cc9a..ea05f3e 100644
77611--- a/include/net/iucv/af_iucv.h
77612+++ b/include/net/iucv/af_iucv.h
77613@@ -149,7 +149,7 @@ struct iucv_skb_cb {
77614 struct iucv_sock_list {
77615 struct hlist_head head;
77616 rwlock_t lock;
77617- atomic_t autobind_name;
77618+ atomic_unchecked_t autobind_name;
77619 };
77620
77621 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
77622diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
77623index df83f69..9b640b8 100644
77624--- a/include/net/llc_c_ac.h
77625+++ b/include/net/llc_c_ac.h
77626@@ -87,7 +87,7 @@
77627 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
77628 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
77629
77630-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
77631+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
77632
77633 extern int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
77634 extern int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
77635diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
77636index 6ca3113..f8026dd 100644
77637--- a/include/net/llc_c_ev.h
77638+++ b/include/net/llc_c_ev.h
77639@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
77640 return (struct llc_conn_state_ev *)skb->cb;
77641 }
77642
77643-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
77644-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
77645+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
77646+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
77647
77648 extern int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
77649 extern int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
77650diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
77651index 0e79cfb..f46db31 100644
77652--- a/include/net/llc_c_st.h
77653+++ b/include/net/llc_c_st.h
77654@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
77655 u8 next_state;
77656 llc_conn_ev_qfyr_t *ev_qualifiers;
77657 llc_conn_action_t *ev_actions;
77658-};
77659+} __do_const;
77660
77661 struct llc_conn_state {
77662 u8 current_state;
77663diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
77664index 37a3bbd..55a4241 100644
77665--- a/include/net/llc_s_ac.h
77666+++ b/include/net/llc_s_ac.h
77667@@ -23,7 +23,7 @@
77668 #define SAP_ACT_TEST_IND 9
77669
77670 /* All action functions must look like this */
77671-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
77672+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
77673
77674 extern int llc_sap_action_unitdata_ind(struct llc_sap *sap,
77675 struct sk_buff *skb);
77676diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
77677index 567c681..cd73ac0 100644
77678--- a/include/net/llc_s_st.h
77679+++ b/include/net/llc_s_st.h
77680@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
77681 llc_sap_ev_t ev;
77682 u8 next_state;
77683 llc_sap_action_t *ev_actions;
77684-};
77685+} __do_const;
77686
77687 struct llc_sap_state {
77688 u8 curr_state;
77689diff --git a/include/net/mac80211.h b/include/net/mac80211.h
77690index 885898a..cdace34 100644
77691--- a/include/net/mac80211.h
77692+++ b/include/net/mac80211.h
77693@@ -4205,7 +4205,7 @@ struct rate_control_ops {
77694 void (*add_sta_debugfs)(void *priv, void *priv_sta,
77695 struct dentry *dir);
77696 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
77697-};
77698+} __do_const;
77699
77700 static inline int rate_supported(struct ieee80211_sta *sta,
77701 enum ieee80211_band band,
77702diff --git a/include/net/neighbour.h b/include/net/neighbour.h
77703index 7e748ad..5c6229b 100644
77704--- a/include/net/neighbour.h
77705+++ b/include/net/neighbour.h
77706@@ -123,7 +123,7 @@ struct neigh_ops {
77707 void (*error_report)(struct neighbour *, struct sk_buff *);
77708 int (*output)(struct neighbour *, struct sk_buff *);
77709 int (*connected_output)(struct neighbour *, struct sk_buff *);
77710-};
77711+} __do_const;
77712
77713 struct pneigh_entry {
77714 struct pneigh_entry *next;
77715diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
77716index b176978..ea169f4 100644
77717--- a/include/net/net_namespace.h
77718+++ b/include/net/net_namespace.h
77719@@ -117,7 +117,7 @@ struct net {
77720 #endif
77721 struct netns_ipvs *ipvs;
77722 struct sock *diag_nlsk;
77723- atomic_t rt_genid;
77724+ atomic_unchecked_t rt_genid;
77725 };
77726
77727 /*
77728@@ -274,7 +274,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
77729 #define __net_init __init
77730 #define __net_exit __exit_refok
77731 #define __net_initdata __initdata
77732+#ifdef CONSTIFY_PLUGIN
77733 #define __net_initconst __initconst
77734+#else
77735+#define __net_initconst __initdata
77736+#endif
77737 #endif
77738
77739 struct pernet_operations {
77740@@ -284,7 +288,7 @@ struct pernet_operations {
77741 void (*exit_batch)(struct list_head *net_exit_list);
77742 int *id;
77743 size_t size;
77744-};
77745+} __do_const;
77746
77747 /*
77748 * Use these carefully. If you implement a network device and it
77749@@ -332,12 +336,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
77750
77751 static inline int rt_genid(struct net *net)
77752 {
77753- return atomic_read(&net->rt_genid);
77754+ return atomic_read_unchecked(&net->rt_genid);
77755 }
77756
77757 static inline void rt_genid_bump(struct net *net)
77758 {
77759- atomic_inc(&net->rt_genid);
77760+ atomic_inc_unchecked(&net->rt_genid);
77761 }
77762
77763 #endif /* __NET_NET_NAMESPACE_H */
77764diff --git a/include/net/netdma.h b/include/net/netdma.h
77765index 8ba8ce2..99b7fff 100644
77766--- a/include/net/netdma.h
77767+++ b/include/net/netdma.h
77768@@ -24,7 +24,7 @@
77769 #include <linux/dmaengine.h>
77770 #include <linux/skbuff.h>
77771
77772-int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
77773+int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
77774 struct sk_buff *skb, int offset, struct iovec *to,
77775 size_t len, struct dma_pinned_list *pinned_list);
77776
77777diff --git a/include/net/netlink.h b/include/net/netlink.h
77778index 9690b0f..87aded7 100644
77779--- a/include/net/netlink.h
77780+++ b/include/net/netlink.h
77781@@ -534,7 +534,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
77782 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
77783 {
77784 if (mark)
77785- skb_trim(skb, (unsigned char *) mark - skb->data);
77786+ skb_trim(skb, (const unsigned char *) mark - skb->data);
77787 }
77788
77789 /**
77790diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
77791index c9c0c53..53f24c3 100644
77792--- a/include/net/netns/conntrack.h
77793+++ b/include/net/netns/conntrack.h
77794@@ -12,10 +12,10 @@ struct nf_conntrack_ecache;
77795 struct nf_proto_net {
77796 #ifdef CONFIG_SYSCTL
77797 struct ctl_table_header *ctl_table_header;
77798- struct ctl_table *ctl_table;
77799+ ctl_table_no_const *ctl_table;
77800 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
77801 struct ctl_table_header *ctl_compat_header;
77802- struct ctl_table *ctl_compat_table;
77803+ ctl_table_no_const *ctl_compat_table;
77804 #endif
77805 #endif
77806 unsigned int users;
77807@@ -58,7 +58,7 @@ struct nf_ip_net {
77808 struct nf_icmp_net icmpv6;
77809 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
77810 struct ctl_table_header *ctl_table_header;
77811- struct ctl_table *ctl_table;
77812+ ctl_table_no_const *ctl_table;
77813 #endif
77814 };
77815
77816diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
77817index 2ba9de8..47bd6c7 100644
77818--- a/include/net/netns/ipv4.h
77819+++ b/include/net/netns/ipv4.h
77820@@ -67,7 +67,7 @@ struct netns_ipv4 {
77821 kgid_t sysctl_ping_group_range[2];
77822 long sysctl_tcp_mem[3];
77823
77824- atomic_t dev_addr_genid;
77825+ atomic_unchecked_t dev_addr_genid;
77826
77827 #ifdef CONFIG_IP_MROUTE
77828 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
77829diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
77830index 005e2c2..023d340 100644
77831--- a/include/net/netns/ipv6.h
77832+++ b/include/net/netns/ipv6.h
77833@@ -71,7 +71,7 @@ struct netns_ipv6 {
77834 struct fib_rules_ops *mr6_rules_ops;
77835 #endif
77836 #endif
77837- atomic_t dev_addr_genid;
77838+ atomic_unchecked_t dev_addr_genid;
77839 };
77840
77841 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
77842diff --git a/include/net/protocol.h b/include/net/protocol.h
77843index 047c047..b9dad15 100644
77844--- a/include/net/protocol.h
77845+++ b/include/net/protocol.h
77846@@ -44,7 +44,7 @@ struct net_protocol {
77847 void (*err_handler)(struct sk_buff *skb, u32 info);
77848 unsigned int no_policy:1,
77849 netns_ok:1;
77850-};
77851+} __do_const;
77852
77853 #if IS_ENABLED(CONFIG_IPV6)
77854 struct inet6_protocol {
77855@@ -57,7 +57,7 @@ struct inet6_protocol {
77856 u8 type, u8 code, int offset,
77857 __be32 info);
77858 unsigned int flags; /* INET6_PROTO_xxx */
77859-};
77860+} __do_const;
77861
77862 #define INET6_PROTO_NOPOLICY 0x1
77863 #define INET6_PROTO_FINAL 0x2
77864diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
77865index 7026648..584cc8c 100644
77866--- a/include/net/rtnetlink.h
77867+++ b/include/net/rtnetlink.h
77868@@ -81,7 +81,7 @@ struct rtnl_link_ops {
77869 const struct net_device *dev);
77870 unsigned int (*get_num_tx_queues)(void);
77871 unsigned int (*get_num_rx_queues)(void);
77872-};
77873+} __do_const;
77874
77875 extern int __rtnl_link_register(struct rtnl_link_ops *ops);
77876 extern void __rtnl_link_unregister(struct rtnl_link_ops *ops);
77877diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
77878index cd89510..d67810f 100644
77879--- a/include/net/sctp/sctp.h
77880+++ b/include/net/sctp/sctp.h
77881@@ -330,9 +330,9 @@ do { \
77882
77883 #else /* SCTP_DEBUG */
77884
77885-#define SCTP_DEBUG_PRINTK(whatever...)
77886-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
77887-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
77888+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
77889+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
77890+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
77891 #define SCTP_ENABLE_DEBUG
77892 #define SCTP_DISABLE_DEBUG
77893 #define SCTP_ASSERT(expr, str, func)
77894diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
77895index 2a82d13..62a31c2 100644
77896--- a/include/net/sctp/sm.h
77897+++ b/include/net/sctp/sm.h
77898@@ -87,7 +87,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
77899 typedef struct {
77900 sctp_state_fn_t *fn;
77901 const char *name;
77902-} sctp_sm_table_entry_t;
77903+} __do_const sctp_sm_table_entry_t;
77904
77905 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
77906 * currently in use.
77907@@ -299,7 +299,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
77908 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
77909
77910 /* Extern declarations for major data structures. */
77911-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
77912+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
77913
77914
77915 /* Get the size of a DATA chunk payload. */
77916diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
77917index 1bd4c41..9250b5b 100644
77918--- a/include/net/sctp/structs.h
77919+++ b/include/net/sctp/structs.h
77920@@ -516,7 +516,7 @@ struct sctp_pf {
77921 struct sctp_association *asoc);
77922 void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
77923 struct sctp_af *af;
77924-};
77925+} __do_const;
77926
77927
77928 /* Structure to track chunk fragments that have been acked, but peer
77929diff --git a/include/net/sock.h b/include/net/sock.h
77930index 66772cf..25bc45b 100644
77931--- a/include/net/sock.h
77932+++ b/include/net/sock.h
77933@@ -325,7 +325,7 @@ struct sock {
77934 #ifdef CONFIG_RPS
77935 __u32 sk_rxhash;
77936 #endif
77937- atomic_t sk_drops;
77938+ atomic_unchecked_t sk_drops;
77939 int sk_rcvbuf;
77940
77941 struct sk_filter __rcu *sk_filter;
77942@@ -1797,7 +1797,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
77943 }
77944
77945 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
77946- char __user *from, char *to,
77947+ char __user *from, unsigned char *to,
77948 int copy, int offset)
77949 {
77950 if (skb->ip_summed == CHECKSUM_NONE) {
77951@@ -2056,7 +2056,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
77952 }
77953 }
77954
77955-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
77956+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
77957
77958 /**
77959 * sk_page_frag - return an appropriate page_frag
77960diff --git a/include/net/tcp.h b/include/net/tcp.h
77961index 5bba80f..8520a82 100644
77962--- a/include/net/tcp.h
77963+++ b/include/net/tcp.h
77964@@ -524,7 +524,7 @@ extern void tcp_retransmit_timer(struct sock *sk);
77965 extern void tcp_xmit_retransmit_queue(struct sock *);
77966 extern void tcp_simple_retransmit(struct sock *);
77967 extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
77968-extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
77969+extern int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
77970
77971 extern void tcp_send_probe0(struct sock *);
77972 extern void tcp_send_partial(struct sock *);
77973@@ -697,8 +697,8 @@ struct tcp_skb_cb {
77974 struct inet6_skb_parm h6;
77975 #endif
77976 } header; /* For incoming frames */
77977- __u32 seq; /* Starting sequence number */
77978- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
77979+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
77980+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
77981 __u32 when; /* used to compute rtt's */
77982 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
77983
77984@@ -712,7 +712,7 @@ struct tcp_skb_cb {
77985
77986 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
77987 /* 1 byte hole */
77988- __u32 ack_seq; /* Sequence number ACK'd */
77989+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
77990 };
77991
77992 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
77993diff --git a/include/net/xfrm.h b/include/net/xfrm.h
77994index 94ce082..62b278d 100644
77995--- a/include/net/xfrm.h
77996+++ b/include/net/xfrm.h
77997@@ -305,7 +305,7 @@ struct xfrm_policy_afinfo {
77998 struct net_device *dev,
77999 const struct flowi *fl);
78000 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
78001-};
78002+} __do_const;
78003
78004 extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
78005 extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
78006@@ -341,7 +341,7 @@ struct xfrm_state_afinfo {
78007 struct sk_buff *skb);
78008 int (*transport_finish)(struct sk_buff *skb,
78009 int async);
78010-};
78011+} __do_const;
78012
78013 extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
78014 extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
78015@@ -424,7 +424,7 @@ struct xfrm_mode {
78016 struct module *owner;
78017 unsigned int encap;
78018 int flags;
78019-};
78020+} __do_const;
78021
78022 /* Flags for xfrm_mode. */
78023 enum {
78024@@ -521,7 +521,7 @@ struct xfrm_policy {
78025 struct timer_list timer;
78026
78027 struct flow_cache_object flo;
78028- atomic_t genid;
78029+ atomic_unchecked_t genid;
78030 u32 priority;
78031 u32 index;
78032 struct xfrm_mark mark;
78033diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
78034index 1a046b1..ee0bef0 100644
78035--- a/include/rdma/iw_cm.h
78036+++ b/include/rdma/iw_cm.h
78037@@ -122,7 +122,7 @@ struct iw_cm_verbs {
78038 int backlog);
78039
78040 int (*destroy_listen)(struct iw_cm_id *cm_id);
78041-};
78042+} __no_const;
78043
78044 /**
78045 * iw_create_cm_id - Create an IW CM identifier.
78046diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
78047index e1379b4..67eafbe 100644
78048--- a/include/scsi/libfc.h
78049+++ b/include/scsi/libfc.h
78050@@ -762,6 +762,7 @@ struct libfc_function_template {
78051 */
78052 void (*disc_stop_final) (struct fc_lport *);
78053 };
78054+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
78055
78056 /**
78057 * struct fc_disc - Discovery context
78058@@ -866,7 +867,7 @@ struct fc_lport {
78059 struct fc_vport *vport;
78060
78061 /* Operational Information */
78062- struct libfc_function_template tt;
78063+ libfc_function_template_no_const tt;
78064 u8 link_up;
78065 u8 qfull;
78066 enum fc_lport_state state;
78067diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
78068index cc64587..608f523 100644
78069--- a/include/scsi/scsi_device.h
78070+++ b/include/scsi/scsi_device.h
78071@@ -171,9 +171,9 @@ struct scsi_device {
78072 unsigned int max_device_blocked; /* what device_blocked counts down from */
78073 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
78074
78075- atomic_t iorequest_cnt;
78076- atomic_t iodone_cnt;
78077- atomic_t ioerr_cnt;
78078+ atomic_unchecked_t iorequest_cnt;
78079+ atomic_unchecked_t iodone_cnt;
78080+ atomic_unchecked_t ioerr_cnt;
78081
78082 struct device sdev_gendev,
78083 sdev_dev;
78084diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
78085index b797e8f..8e2c3aa 100644
78086--- a/include/scsi/scsi_transport_fc.h
78087+++ b/include/scsi/scsi_transport_fc.h
78088@@ -751,7 +751,8 @@ struct fc_function_template {
78089 unsigned long show_host_system_hostname:1;
78090
78091 unsigned long disable_target_scan:1;
78092-};
78093+} __do_const;
78094+typedef struct fc_function_template __no_const fc_function_template_no_const;
78095
78096
78097 /**
78098diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
78099index 9031a26..750d592 100644
78100--- a/include/sound/compress_driver.h
78101+++ b/include/sound/compress_driver.h
78102@@ -128,7 +128,7 @@ struct snd_compr_ops {
78103 struct snd_compr_caps *caps);
78104 int (*get_codec_caps) (struct snd_compr_stream *stream,
78105 struct snd_compr_codec_caps *codec);
78106-};
78107+} __no_const;
78108
78109 /**
78110 * struct snd_compr: Compressed device
78111diff --git a/include/sound/soc.h b/include/sound/soc.h
78112index 85c1522..f44bad1 100644
78113--- a/include/sound/soc.h
78114+++ b/include/sound/soc.h
78115@@ -781,7 +781,7 @@ struct snd_soc_codec_driver {
78116 /* probe ordering - for components with runtime dependencies */
78117 int probe_order;
78118 int remove_order;
78119-};
78120+} __do_const;
78121
78122 /* SoC platform interface */
78123 struct snd_soc_platform_driver {
78124@@ -827,7 +827,7 @@ struct snd_soc_platform_driver {
78125 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
78126 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
78127 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
78128-};
78129+} __do_const;
78130
78131 struct snd_soc_platform {
78132 const char *name;
78133diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
78134index 4ea4f98..a63629b 100644
78135--- a/include/target/target_core_base.h
78136+++ b/include/target/target_core_base.h
78137@@ -653,7 +653,7 @@ struct se_device {
78138 spinlock_t stats_lock;
78139 /* Active commands on this virtual SE device */
78140 atomic_t simple_cmds;
78141- atomic_t dev_ordered_id;
78142+ atomic_unchecked_t dev_ordered_id;
78143 atomic_t dev_ordered_sync;
78144 atomic_t dev_qf_count;
78145 int export_count;
78146diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
78147new file mode 100644
78148index 0000000..fb634b7
78149--- /dev/null
78150+++ b/include/trace/events/fs.h
78151@@ -0,0 +1,53 @@
78152+#undef TRACE_SYSTEM
78153+#define TRACE_SYSTEM fs
78154+
78155+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
78156+#define _TRACE_FS_H
78157+
78158+#include <linux/fs.h>
78159+#include <linux/tracepoint.h>
78160+
78161+TRACE_EVENT(do_sys_open,
78162+
78163+ TP_PROTO(const char *filename, int flags, int mode),
78164+
78165+ TP_ARGS(filename, flags, mode),
78166+
78167+ TP_STRUCT__entry(
78168+ __string( filename, filename )
78169+ __field( int, flags )
78170+ __field( int, mode )
78171+ ),
78172+
78173+ TP_fast_assign(
78174+ __assign_str(filename, filename);
78175+ __entry->flags = flags;
78176+ __entry->mode = mode;
78177+ ),
78178+
78179+ TP_printk("\"%s\" %x %o",
78180+ __get_str(filename), __entry->flags, __entry->mode)
78181+);
78182+
78183+TRACE_EVENT(open_exec,
78184+
78185+ TP_PROTO(const char *filename),
78186+
78187+ TP_ARGS(filename),
78188+
78189+ TP_STRUCT__entry(
78190+ __string( filename, filename )
78191+ ),
78192+
78193+ TP_fast_assign(
78194+ __assign_str(filename, filename);
78195+ ),
78196+
78197+ TP_printk("\"%s\"",
78198+ __get_str(filename))
78199+);
78200+
78201+#endif /* _TRACE_FS_H */
78202+
78203+/* This part must be outside protection */
78204+#include <trace/define_trace.h>
78205diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
78206index 1c09820..7f5ec79 100644
78207--- a/include/trace/events/irq.h
78208+++ b/include/trace/events/irq.h
78209@@ -36,7 +36,7 @@ struct softirq_action;
78210 */
78211 TRACE_EVENT(irq_handler_entry,
78212
78213- TP_PROTO(int irq, struct irqaction *action),
78214+ TP_PROTO(int irq, const struct irqaction *action),
78215
78216 TP_ARGS(irq, action),
78217
78218@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
78219 */
78220 TRACE_EVENT(irq_handler_exit,
78221
78222- TP_PROTO(int irq, struct irqaction *action, int ret),
78223+ TP_PROTO(int irq, const struct irqaction *action, int ret),
78224
78225 TP_ARGS(irq, action, ret),
78226
78227diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
78228index 7caf44c..23c6f27 100644
78229--- a/include/uapi/linux/a.out.h
78230+++ b/include/uapi/linux/a.out.h
78231@@ -39,6 +39,14 @@ enum machine_type {
78232 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
78233 };
78234
78235+/* Constants for the N_FLAGS field */
78236+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
78237+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
78238+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
78239+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
78240+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
78241+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
78242+
78243 #if !defined (N_MAGIC)
78244 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
78245 #endif
78246diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
78247index d876736..ccce5c0 100644
78248--- a/include/uapi/linux/byteorder/little_endian.h
78249+++ b/include/uapi/linux/byteorder/little_endian.h
78250@@ -42,51 +42,51 @@
78251
78252 static inline __le64 __cpu_to_le64p(const __u64 *p)
78253 {
78254- return (__force __le64)*p;
78255+ return (__force const __le64)*p;
78256 }
78257-static inline __u64 __le64_to_cpup(const __le64 *p)
78258+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
78259 {
78260- return (__force __u64)*p;
78261+ return (__force const __u64)*p;
78262 }
78263 static inline __le32 __cpu_to_le32p(const __u32 *p)
78264 {
78265- return (__force __le32)*p;
78266+ return (__force const __le32)*p;
78267 }
78268 static inline __u32 __le32_to_cpup(const __le32 *p)
78269 {
78270- return (__force __u32)*p;
78271+ return (__force const __u32)*p;
78272 }
78273 static inline __le16 __cpu_to_le16p(const __u16 *p)
78274 {
78275- return (__force __le16)*p;
78276+ return (__force const __le16)*p;
78277 }
78278 static inline __u16 __le16_to_cpup(const __le16 *p)
78279 {
78280- return (__force __u16)*p;
78281+ return (__force const __u16)*p;
78282 }
78283 static inline __be64 __cpu_to_be64p(const __u64 *p)
78284 {
78285- return (__force __be64)__swab64p(p);
78286+ return (__force const __be64)__swab64p(p);
78287 }
78288 static inline __u64 __be64_to_cpup(const __be64 *p)
78289 {
78290- return __swab64p((__u64 *)p);
78291+ return __swab64p((const __u64 *)p);
78292 }
78293 static inline __be32 __cpu_to_be32p(const __u32 *p)
78294 {
78295- return (__force __be32)__swab32p(p);
78296+ return (__force const __be32)__swab32p(p);
78297 }
78298-static inline __u32 __be32_to_cpup(const __be32 *p)
78299+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
78300 {
78301- return __swab32p((__u32 *)p);
78302+ return __swab32p((const __u32 *)p);
78303 }
78304 static inline __be16 __cpu_to_be16p(const __u16 *p)
78305 {
78306- return (__force __be16)__swab16p(p);
78307+ return (__force const __be16)__swab16p(p);
78308 }
78309 static inline __u16 __be16_to_cpup(const __be16 *p)
78310 {
78311- return __swab16p((__u16 *)p);
78312+ return __swab16p((const __u16 *)p);
78313 }
78314 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
78315 #define __le64_to_cpus(x) do { (void)(x); } while (0)
78316diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
78317index ef6103b..d4e65dd 100644
78318--- a/include/uapi/linux/elf.h
78319+++ b/include/uapi/linux/elf.h
78320@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
78321 #define PT_GNU_EH_FRAME 0x6474e550
78322
78323 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
78324+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
78325+
78326+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
78327+
78328+/* Constants for the e_flags field */
78329+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
78330+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
78331+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
78332+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
78333+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
78334+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
78335
78336 /*
78337 * Extended Numbering
78338@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
78339 #define DT_DEBUG 21
78340 #define DT_TEXTREL 22
78341 #define DT_JMPREL 23
78342+#define DT_FLAGS 30
78343+ #define DF_TEXTREL 0x00000004
78344 #define DT_ENCODING 32
78345 #define OLD_DT_LOOS 0x60000000
78346 #define DT_LOOS 0x6000000d
78347@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
78348 #define PF_W 0x2
78349 #define PF_X 0x1
78350
78351+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
78352+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
78353+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
78354+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
78355+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
78356+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
78357+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
78358+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
78359+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
78360+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
78361+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
78362+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
78363+
78364 typedef struct elf32_phdr{
78365 Elf32_Word p_type;
78366 Elf32_Off p_offset;
78367@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
78368 #define EI_OSABI 7
78369 #define EI_PAD 8
78370
78371+#define EI_PAX 14
78372+
78373 #define ELFMAG0 0x7f /* EI_MAG */
78374 #define ELFMAG1 'E'
78375 #define ELFMAG2 'L'
78376diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
78377index aa169c4..6a2771d 100644
78378--- a/include/uapi/linux/personality.h
78379+++ b/include/uapi/linux/personality.h
78380@@ -30,6 +30,7 @@ enum {
78381 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
78382 ADDR_NO_RANDOMIZE | \
78383 ADDR_COMPAT_LAYOUT | \
78384+ ADDR_LIMIT_3GB | \
78385 MMAP_PAGE_ZERO)
78386
78387 /*
78388diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
78389index 7530e74..e714828 100644
78390--- a/include/uapi/linux/screen_info.h
78391+++ b/include/uapi/linux/screen_info.h
78392@@ -43,7 +43,8 @@ struct screen_info {
78393 __u16 pages; /* 0x32 */
78394 __u16 vesa_attributes; /* 0x34 */
78395 __u32 capabilities; /* 0x36 */
78396- __u8 _reserved[6]; /* 0x3a */
78397+ __u16 vesapm_size; /* 0x3a */
78398+ __u8 _reserved[4]; /* 0x3c */
78399 } __attribute__((packed));
78400
78401 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
78402diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
78403index 0e011eb..82681b1 100644
78404--- a/include/uapi/linux/swab.h
78405+++ b/include/uapi/linux/swab.h
78406@@ -43,7 +43,7 @@
78407 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
78408 */
78409
78410-static inline __attribute_const__ __u16 __fswab16(__u16 val)
78411+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
78412 {
78413 #ifdef __HAVE_BUILTIN_BSWAP16__
78414 return __builtin_bswap16(val);
78415@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
78416 #endif
78417 }
78418
78419-static inline __attribute_const__ __u32 __fswab32(__u32 val)
78420+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
78421 {
78422 #ifdef __HAVE_BUILTIN_BSWAP32__
78423 return __builtin_bswap32(val);
78424@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
78425 #endif
78426 }
78427
78428-static inline __attribute_const__ __u64 __fswab64(__u64 val)
78429+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
78430 {
78431 #ifdef __HAVE_BUILTIN_BSWAP64__
78432 return __builtin_bswap64(val);
78433diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
78434index 6d67213..8dab561 100644
78435--- a/include/uapi/linux/sysctl.h
78436+++ b/include/uapi/linux/sysctl.h
78437@@ -155,7 +155,11 @@ enum
78438 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
78439 };
78440
78441-
78442+#ifdef CONFIG_PAX_SOFTMODE
78443+enum {
78444+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
78445+};
78446+#endif
78447
78448 /* CTL_VM names: */
78449 enum
78450diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
78451index e4629b9..6958086 100644
78452--- a/include/uapi/linux/xattr.h
78453+++ b/include/uapi/linux/xattr.h
78454@@ -63,5 +63,9 @@
78455 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
78456 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
78457
78458+/* User namespace */
78459+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
78460+#define XATTR_PAX_FLAGS_SUFFIX "flags"
78461+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
78462
78463 #endif /* _UAPI_LINUX_XATTR_H */
78464diff --git a/include/video/udlfb.h b/include/video/udlfb.h
78465index f9466fa..f4e2b81 100644
78466--- a/include/video/udlfb.h
78467+++ b/include/video/udlfb.h
78468@@ -53,10 +53,10 @@ struct dlfb_data {
78469 u32 pseudo_palette[256];
78470 int blank_mode; /*one of FB_BLANK_ */
78471 /* blit-only rendering path metrics, exposed through sysfs */
78472- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
78473- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
78474- atomic_t bytes_sent; /* to usb, after compression including overhead */
78475- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
78476+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
78477+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
78478+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
78479+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
78480 };
78481
78482 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
78483diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
78484index 1a91850..28573f8 100644
78485--- a/include/video/uvesafb.h
78486+++ b/include/video/uvesafb.h
78487@@ -122,6 +122,7 @@ struct uvesafb_par {
78488 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
78489 u8 pmi_setpal; /* PMI for palette changes */
78490 u16 *pmi_base; /* protected mode interface location */
78491+ u8 *pmi_code; /* protected mode code location */
78492 void *pmi_start;
78493 void *pmi_pal;
78494 u8 *vbe_state_orig; /*
78495diff --git a/init/Kconfig b/init/Kconfig
78496index 2d9b831..ae4c8ac 100644
78497--- a/init/Kconfig
78498+++ b/init/Kconfig
78499@@ -1029,6 +1029,7 @@ endif # CGROUPS
78500
78501 config CHECKPOINT_RESTORE
78502 bool "Checkpoint/restore support" if EXPERT
78503+ depends on !GRKERNSEC
78504 default n
78505 help
78506 Enables additional kernel features in a sake of checkpoint/restore.
78507@@ -1516,7 +1517,7 @@ config SLUB_DEBUG
78508
78509 config COMPAT_BRK
78510 bool "Disable heap randomization"
78511- default y
78512+ default n
78513 help
78514 Randomizing heap placement makes heap exploits harder, but it
78515 also breaks ancient binaries (including anything libc5 based).
78516@@ -1779,7 +1780,7 @@ config INIT_ALL_POSSIBLE
78517 config STOP_MACHINE
78518 bool
78519 default y
78520- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
78521+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
78522 help
78523 Need stop_machine() primitive.
78524
78525diff --git a/init/Makefile b/init/Makefile
78526index 7bc47ee..6da2dc7 100644
78527--- a/init/Makefile
78528+++ b/init/Makefile
78529@@ -2,6 +2,9 @@
78530 # Makefile for the linux kernel.
78531 #
78532
78533+ccflags-y := $(GCC_PLUGINS_CFLAGS)
78534+asflags-y := $(GCC_PLUGINS_AFLAGS)
78535+
78536 obj-y := main.o version.o mounts.o
78537 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
78538 obj-y += noinitramfs.o
78539diff --git a/init/do_mounts.c b/init/do_mounts.c
78540index a2b49f2..03a0e17c 100644
78541--- a/init/do_mounts.c
78542+++ b/init/do_mounts.c
78543@@ -355,11 +355,11 @@ static void __init get_fs_names(char *page)
78544 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
78545 {
78546 struct super_block *s;
78547- int err = sys_mount(name, "/root", fs, flags, data);
78548+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
78549 if (err)
78550 return err;
78551
78552- sys_chdir("/root");
78553+ sys_chdir((const char __force_user *)"/root");
78554 s = current->fs->pwd.dentry->d_sb;
78555 ROOT_DEV = s->s_dev;
78556 printk(KERN_INFO
78557@@ -480,18 +480,18 @@ void __init change_floppy(char *fmt, ...)
78558 va_start(args, fmt);
78559 vsprintf(buf, fmt, args);
78560 va_end(args);
78561- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
78562+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
78563 if (fd >= 0) {
78564 sys_ioctl(fd, FDEJECT, 0);
78565 sys_close(fd);
78566 }
78567 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
78568- fd = sys_open("/dev/console", O_RDWR, 0);
78569+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
78570 if (fd >= 0) {
78571 sys_ioctl(fd, TCGETS, (long)&termios);
78572 termios.c_lflag &= ~ICANON;
78573 sys_ioctl(fd, TCSETSF, (long)&termios);
78574- sys_read(fd, &c, 1);
78575+ sys_read(fd, (char __user *)&c, 1);
78576 termios.c_lflag |= ICANON;
78577 sys_ioctl(fd, TCSETSF, (long)&termios);
78578 sys_close(fd);
78579@@ -585,6 +585,6 @@ void __init prepare_namespace(void)
78580 mount_root();
78581 out:
78582 devtmpfs_mount("dev");
78583- sys_mount(".", "/", NULL, MS_MOVE, NULL);
78584- sys_chroot(".");
78585+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
78586+ sys_chroot((const char __force_user *)".");
78587 }
78588diff --git a/init/do_mounts.h b/init/do_mounts.h
78589index f5b978a..69dbfe8 100644
78590--- a/init/do_mounts.h
78591+++ b/init/do_mounts.h
78592@@ -15,15 +15,15 @@ extern int root_mountflags;
78593
78594 static inline int create_dev(char *name, dev_t dev)
78595 {
78596- sys_unlink(name);
78597- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
78598+ sys_unlink((char __force_user *)name);
78599+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
78600 }
78601
78602 #if BITS_PER_LONG == 32
78603 static inline u32 bstat(char *name)
78604 {
78605 struct stat64 stat;
78606- if (sys_stat64(name, &stat) != 0)
78607+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
78608 return 0;
78609 if (!S_ISBLK(stat.st_mode))
78610 return 0;
78611@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
78612 static inline u32 bstat(char *name)
78613 {
78614 struct stat stat;
78615- if (sys_newstat(name, &stat) != 0)
78616+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
78617 return 0;
78618 if (!S_ISBLK(stat.st_mode))
78619 return 0;
78620diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
78621index 3e0878e..8a9d7a0 100644
78622--- a/init/do_mounts_initrd.c
78623+++ b/init/do_mounts_initrd.c
78624@@ -37,13 +37,13 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new)
78625 {
78626 sys_unshare(CLONE_FS | CLONE_FILES);
78627 /* stdin/stdout/stderr for /linuxrc */
78628- sys_open("/dev/console", O_RDWR, 0);
78629+ sys_open((const char __force_user *)"/dev/console", O_RDWR, 0);
78630 sys_dup(0);
78631 sys_dup(0);
78632 /* move initrd over / and chdir/chroot in initrd root */
78633- sys_chdir("/root");
78634- sys_mount(".", "/", NULL, MS_MOVE, NULL);
78635- sys_chroot(".");
78636+ sys_chdir((const char __force_user *)"/root");
78637+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
78638+ sys_chroot((const char __force_user *)".");
78639 sys_setsid();
78640 return 0;
78641 }
78642@@ -59,8 +59,8 @@ static void __init handle_initrd(void)
78643 create_dev("/dev/root.old", Root_RAM0);
78644 /* mount initrd on rootfs' /root */
78645 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
78646- sys_mkdir("/old", 0700);
78647- sys_chdir("/old");
78648+ sys_mkdir((const char __force_user *)"/old", 0700);
78649+ sys_chdir((const char __force_user *)"/old");
78650
78651 /* try loading default modules from initrd */
78652 load_default_modules();
78653@@ -80,31 +80,31 @@ static void __init handle_initrd(void)
78654 current->flags &= ~PF_FREEZER_SKIP;
78655
78656 /* move initrd to rootfs' /old */
78657- sys_mount("..", ".", NULL, MS_MOVE, NULL);
78658+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
78659 /* switch root and cwd back to / of rootfs */
78660- sys_chroot("..");
78661+ sys_chroot((const char __force_user *)"..");
78662
78663 if (new_decode_dev(real_root_dev) == Root_RAM0) {
78664- sys_chdir("/old");
78665+ sys_chdir((const char __force_user *)"/old");
78666 return;
78667 }
78668
78669- sys_chdir("/");
78670+ sys_chdir((const char __force_user *)"/");
78671 ROOT_DEV = new_decode_dev(real_root_dev);
78672 mount_root();
78673
78674 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
78675- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
78676+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
78677 if (!error)
78678 printk("okay\n");
78679 else {
78680- int fd = sys_open("/dev/root.old", O_RDWR, 0);
78681+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
78682 if (error == -ENOENT)
78683 printk("/initrd does not exist. Ignored.\n");
78684 else
78685 printk("failed\n");
78686 printk(KERN_NOTICE "Unmounting old root\n");
78687- sys_umount("/old", MNT_DETACH);
78688+ sys_umount((char __force_user *)"/old", MNT_DETACH);
78689 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
78690 if (fd < 0) {
78691 error = fd;
78692@@ -127,11 +127,11 @@ int __init initrd_load(void)
78693 * mounted in the normal path.
78694 */
78695 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
78696- sys_unlink("/initrd.image");
78697+ sys_unlink((const char __force_user *)"/initrd.image");
78698 handle_initrd();
78699 return 1;
78700 }
78701 }
78702- sys_unlink("/initrd.image");
78703+ sys_unlink((const char __force_user *)"/initrd.image");
78704 return 0;
78705 }
78706diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
78707index 8cb6db5..d729f50 100644
78708--- a/init/do_mounts_md.c
78709+++ b/init/do_mounts_md.c
78710@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
78711 partitioned ? "_d" : "", minor,
78712 md_setup_args[ent].device_names);
78713
78714- fd = sys_open(name, 0, 0);
78715+ fd = sys_open((char __force_user *)name, 0, 0);
78716 if (fd < 0) {
78717 printk(KERN_ERR "md: open failed - cannot start "
78718 "array %s\n", name);
78719@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
78720 * array without it
78721 */
78722 sys_close(fd);
78723- fd = sys_open(name, 0, 0);
78724+ fd = sys_open((char __force_user *)name, 0, 0);
78725 sys_ioctl(fd, BLKRRPART, 0);
78726 }
78727 sys_close(fd);
78728@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
78729
78730 wait_for_device_probe();
78731
78732- fd = sys_open("/dev/md0", 0, 0);
78733+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
78734 if (fd >= 0) {
78735 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
78736 sys_close(fd);
78737diff --git a/init/init_task.c b/init/init_task.c
78738index ba0a7f36..2bcf1d5 100644
78739--- a/init/init_task.c
78740+++ b/init/init_task.c
78741@@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task);
78742 * Initial thread structure. Alignment of this is handled by a special
78743 * linker map entry.
78744 */
78745+#ifdef CONFIG_X86
78746+union thread_union init_thread_union __init_task_data;
78747+#else
78748 union thread_union init_thread_union __init_task_data =
78749 { INIT_THREAD_INFO(init_task) };
78750+#endif
78751diff --git a/init/initramfs.c b/init/initramfs.c
78752index a67ef9d..2d17ed9 100644
78753--- a/init/initramfs.c
78754+++ b/init/initramfs.c
78755@@ -84,7 +84,7 @@ static void __init free_hash(void)
78756 }
78757 }
78758
78759-static long __init do_utime(char *filename, time_t mtime)
78760+static long __init do_utime(char __force_user *filename, time_t mtime)
78761 {
78762 struct timespec t[2];
78763
78764@@ -119,7 +119,7 @@ static void __init dir_utime(void)
78765 struct dir_entry *de, *tmp;
78766 list_for_each_entry_safe(de, tmp, &dir_list, list) {
78767 list_del(&de->list);
78768- do_utime(de->name, de->mtime);
78769+ do_utime((char __force_user *)de->name, de->mtime);
78770 kfree(de->name);
78771 kfree(de);
78772 }
78773@@ -281,7 +281,7 @@ static int __init maybe_link(void)
78774 if (nlink >= 2) {
78775 char *old = find_link(major, minor, ino, mode, collected);
78776 if (old)
78777- return (sys_link(old, collected) < 0) ? -1 : 1;
78778+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
78779 }
78780 return 0;
78781 }
78782@@ -290,11 +290,11 @@ static void __init clean_path(char *path, umode_t mode)
78783 {
78784 struct stat st;
78785
78786- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
78787+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
78788 if (S_ISDIR(st.st_mode))
78789- sys_rmdir(path);
78790+ sys_rmdir((char __force_user *)path);
78791 else
78792- sys_unlink(path);
78793+ sys_unlink((char __force_user *)path);
78794 }
78795 }
78796
78797@@ -315,7 +315,7 @@ static int __init do_name(void)
78798 int openflags = O_WRONLY|O_CREAT;
78799 if (ml != 1)
78800 openflags |= O_TRUNC;
78801- wfd = sys_open(collected, openflags, mode);
78802+ wfd = sys_open((char __force_user *)collected, openflags, mode);
78803
78804 if (wfd >= 0) {
78805 sys_fchown(wfd, uid, gid);
78806@@ -327,17 +327,17 @@ static int __init do_name(void)
78807 }
78808 }
78809 } else if (S_ISDIR(mode)) {
78810- sys_mkdir(collected, mode);
78811- sys_chown(collected, uid, gid);
78812- sys_chmod(collected, mode);
78813+ sys_mkdir((char __force_user *)collected, mode);
78814+ sys_chown((char __force_user *)collected, uid, gid);
78815+ sys_chmod((char __force_user *)collected, mode);
78816 dir_add(collected, mtime);
78817 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
78818 S_ISFIFO(mode) || S_ISSOCK(mode)) {
78819 if (maybe_link() == 0) {
78820- sys_mknod(collected, mode, rdev);
78821- sys_chown(collected, uid, gid);
78822- sys_chmod(collected, mode);
78823- do_utime(collected, mtime);
78824+ sys_mknod((char __force_user *)collected, mode, rdev);
78825+ sys_chown((char __force_user *)collected, uid, gid);
78826+ sys_chmod((char __force_user *)collected, mode);
78827+ do_utime((char __force_user *)collected, mtime);
78828 }
78829 }
78830 return 0;
78831@@ -346,15 +346,15 @@ static int __init do_name(void)
78832 static int __init do_copy(void)
78833 {
78834 if (count >= body_len) {
78835- sys_write(wfd, victim, body_len);
78836+ sys_write(wfd, (char __force_user *)victim, body_len);
78837 sys_close(wfd);
78838- do_utime(vcollected, mtime);
78839+ do_utime((char __force_user *)vcollected, mtime);
78840 kfree(vcollected);
78841 eat(body_len);
78842 state = SkipIt;
78843 return 0;
78844 } else {
78845- sys_write(wfd, victim, count);
78846+ sys_write(wfd, (char __force_user *)victim, count);
78847 body_len -= count;
78848 eat(count);
78849 return 1;
78850@@ -365,9 +365,9 @@ static int __init do_symlink(void)
78851 {
78852 collected[N_ALIGN(name_len) + body_len] = '\0';
78853 clean_path(collected, 0);
78854- sys_symlink(collected + N_ALIGN(name_len), collected);
78855- sys_lchown(collected, uid, gid);
78856- do_utime(collected, mtime);
78857+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
78858+ sys_lchown((char __force_user *)collected, uid, gid);
78859+ do_utime((char __force_user *)collected, mtime);
78860 state = SkipIt;
78861 next_state = Reset;
78862 return 0;
78863@@ -583,7 +583,7 @@ static int __init populate_rootfs(void)
78864 {
78865 char *err = unpack_to_rootfs(__initramfs_start, __initramfs_size);
78866 if (err)
78867- panic(err); /* Failed to decompress INTERNAL initramfs */
78868+ panic("%s", err); /* Failed to decompress INTERNAL initramfs */
78869 if (initrd_start) {
78870 #ifdef CONFIG_BLK_DEV_RAM
78871 int fd;
78872diff --git a/init/main.c b/init/main.c
78873index 9484f4b..0eac7c3 100644
78874--- a/init/main.c
78875+++ b/init/main.c
78876@@ -100,6 +100,8 @@ static inline void mark_rodata_ro(void) { }
78877 extern void tc_init(void);
78878 #endif
78879
78880+extern void grsecurity_init(void);
78881+
78882 /*
78883 * Debug helper: via this flag we know that we are in 'early bootup code'
78884 * where only the boot processor is running with IRQ disabled. This means
78885@@ -153,6 +155,74 @@ static int __init set_reset_devices(char *str)
78886
78887 __setup("reset_devices", set_reset_devices);
78888
78889+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
78890+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
78891+static int __init setup_grsec_proc_gid(char *str)
78892+{
78893+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
78894+ return 1;
78895+}
78896+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
78897+#endif
78898+
78899+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
78900+unsigned long pax_user_shadow_base __read_only;
78901+EXPORT_SYMBOL(pax_user_shadow_base);
78902+extern char pax_enter_kernel_user[];
78903+extern char pax_exit_kernel_user[];
78904+#endif
78905+
78906+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
78907+static int __init setup_pax_nouderef(char *str)
78908+{
78909+#ifdef CONFIG_X86_32
78910+ unsigned int cpu;
78911+ struct desc_struct *gdt;
78912+
78913+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
78914+ gdt = get_cpu_gdt_table(cpu);
78915+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
78916+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
78917+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
78918+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
78919+ }
78920+ loadsegment(ds, __KERNEL_DS);
78921+ loadsegment(es, __KERNEL_DS);
78922+ loadsegment(ss, __KERNEL_DS);
78923+#else
78924+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
78925+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
78926+ clone_pgd_mask = ~(pgdval_t)0UL;
78927+ pax_user_shadow_base = 0UL;
78928+ setup_clear_cpu_cap(X86_FEATURE_PCID);
78929+#endif
78930+
78931+ return 0;
78932+}
78933+early_param("pax_nouderef", setup_pax_nouderef);
78934+
78935+#ifdef CONFIG_X86_64
78936+static int __init setup_pax_weakuderef(char *str)
78937+{
78938+ if (clone_pgd_mask != ~(pgdval_t)0UL)
78939+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
78940+ return 1;
78941+}
78942+__setup("pax_weakuderef", setup_pax_weakuderef);
78943+#endif
78944+#endif
78945+
78946+#ifdef CONFIG_PAX_SOFTMODE
78947+int pax_softmode;
78948+
78949+static int __init setup_pax_softmode(char *str)
78950+{
78951+ get_option(&str, &pax_softmode);
78952+ return 1;
78953+}
78954+__setup("pax_softmode=", setup_pax_softmode);
78955+#endif
78956+
78957 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
78958 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
78959 static const char *panic_later, *panic_param;
78960@@ -655,8 +725,6 @@ static void __init do_ctors(void)
78961 bool initcall_debug;
78962 core_param(initcall_debug, initcall_debug, bool, 0644);
78963
78964-static char msgbuf[64];
78965-
78966 static int __init_or_module do_one_initcall_debug(initcall_t fn)
78967 {
78968 ktime_t calltime, delta, rettime;
78969@@ -679,23 +747,22 @@ int __init_or_module do_one_initcall(initcall_t fn)
78970 {
78971 int count = preempt_count();
78972 int ret;
78973+ const char *msg1 = "", *msg2 = "";
78974
78975 if (initcall_debug)
78976 ret = do_one_initcall_debug(fn);
78977 else
78978 ret = fn();
78979
78980- msgbuf[0] = 0;
78981-
78982 if (preempt_count() != count) {
78983- sprintf(msgbuf, "preemption imbalance ");
78984+ msg1 = " preemption imbalance";
78985 preempt_count() = count;
78986 }
78987 if (irqs_disabled()) {
78988- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
78989+ msg2 = " disabled interrupts";
78990 local_irq_enable();
78991 }
78992- WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
78993+ WARN(*msg1 || *msg2, "initcall %pF returned with%s%s\n", fn, msg1, msg2);
78994
78995 return ret;
78996 }
78997@@ -748,8 +815,14 @@ static void __init do_initcall_level(int level)
78998 level, level,
78999 &repair_env_string);
79000
79001- for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++)
79002+ for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++) {
79003 do_one_initcall(*fn);
79004+
79005+#ifdef LATENT_ENTROPY_PLUGIN
79006+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
79007+#endif
79008+
79009+ }
79010 }
79011
79012 static void __init do_initcalls(void)
79013@@ -783,8 +856,14 @@ static void __init do_pre_smp_initcalls(void)
79014 {
79015 initcall_t *fn;
79016
79017- for (fn = __initcall_start; fn < __initcall0_start; fn++)
79018+ for (fn = __initcall_start; fn < __initcall0_start; fn++) {
79019 do_one_initcall(*fn);
79020+
79021+#ifdef LATENT_ENTROPY_PLUGIN
79022+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
79023+#endif
79024+
79025+ }
79026 }
79027
79028 /*
79029@@ -802,8 +881,8 @@ static int run_init_process(const char *init_filename)
79030 {
79031 argv_init[0] = init_filename;
79032 return do_execve(init_filename,
79033- (const char __user *const __user *)argv_init,
79034- (const char __user *const __user *)envp_init);
79035+ (const char __user *const __force_user *)argv_init,
79036+ (const char __user *const __force_user *)envp_init);
79037 }
79038
79039 static noinline void __init kernel_init_freeable(void);
79040@@ -880,7 +959,7 @@ static noinline void __init kernel_init_freeable(void)
79041 do_basic_setup();
79042
79043 /* Open the /dev/console on the rootfs, this should never fail */
79044- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
79045+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
79046 pr_err("Warning: unable to open an initial console.\n");
79047
79048 (void) sys_dup(0);
79049@@ -893,11 +972,13 @@ static noinline void __init kernel_init_freeable(void)
79050 if (!ramdisk_execute_command)
79051 ramdisk_execute_command = "/init";
79052
79053- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
79054+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
79055 ramdisk_execute_command = NULL;
79056 prepare_namespace();
79057 }
79058
79059+ grsecurity_init();
79060+
79061 /*
79062 * Ok, we have completed the initial bootup, and
79063 * we're essentially up and running. Get rid of the
79064diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
79065index 130dfec..cc88451 100644
79066--- a/ipc/ipc_sysctl.c
79067+++ b/ipc/ipc_sysctl.c
79068@@ -30,7 +30,7 @@ static void *get_ipc(ctl_table *table)
79069 static int proc_ipc_dointvec(ctl_table *table, int write,
79070 void __user *buffer, size_t *lenp, loff_t *ppos)
79071 {
79072- struct ctl_table ipc_table;
79073+ ctl_table_no_const ipc_table;
79074
79075 memcpy(&ipc_table, table, sizeof(ipc_table));
79076 ipc_table.data = get_ipc(table);
79077@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(ctl_table *table, int write,
79078 static int proc_ipc_dointvec_minmax(ctl_table *table, int write,
79079 void __user *buffer, size_t *lenp, loff_t *ppos)
79080 {
79081- struct ctl_table ipc_table;
79082+ ctl_table_no_const ipc_table;
79083
79084 memcpy(&ipc_table, table, sizeof(ipc_table));
79085 ipc_table.data = get_ipc(table);
79086@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(ctl_table *table, int write,
79087 static int proc_ipc_callback_dointvec(ctl_table *table, int write,
79088 void __user *buffer, size_t *lenp, loff_t *ppos)
79089 {
79090- struct ctl_table ipc_table;
79091+ ctl_table_no_const ipc_table;
79092 size_t lenp_bef = *lenp;
79093 int rc;
79094
79095@@ -88,7 +88,7 @@ static int proc_ipc_callback_dointvec(ctl_table *table, int write,
79096 static int proc_ipc_doulongvec_minmax(ctl_table *table, int write,
79097 void __user *buffer, size_t *lenp, loff_t *ppos)
79098 {
79099- struct ctl_table ipc_table;
79100+ ctl_table_no_const ipc_table;
79101 memcpy(&ipc_table, table, sizeof(ipc_table));
79102 ipc_table.data = get_ipc(table);
79103
79104@@ -122,7 +122,7 @@ static void ipc_auto_callback(int val)
79105 static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write,
79106 void __user *buffer, size_t *lenp, loff_t *ppos)
79107 {
79108- struct ctl_table ipc_table;
79109+ ctl_table_no_const ipc_table;
79110 size_t lenp_bef = *lenp;
79111 int oldval;
79112 int rc;
79113diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
79114index 383d638..943fdbb 100644
79115--- a/ipc/mq_sysctl.c
79116+++ b/ipc/mq_sysctl.c
79117@@ -25,7 +25,7 @@ static void *get_mq(ctl_table *table)
79118 static int proc_mq_dointvec_minmax(ctl_table *table, int write,
79119 void __user *buffer, size_t *lenp, loff_t *ppos)
79120 {
79121- struct ctl_table mq_table;
79122+ ctl_table_no_const mq_table;
79123 memcpy(&mq_table, table, sizeof(mq_table));
79124 mq_table.data = get_mq(table);
79125
79126diff --git a/ipc/mqueue.c b/ipc/mqueue.c
79127index e4e47f6..a85e0ad 100644
79128--- a/ipc/mqueue.c
79129+++ b/ipc/mqueue.c
79130@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
79131 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
79132 info->attr.mq_msgsize);
79133
79134+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
79135 spin_lock(&mq_lock);
79136 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
79137 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
79138diff --git a/ipc/msg.c b/ipc/msg.c
79139index d0c6d96..69a893c 100644
79140--- a/ipc/msg.c
79141+++ b/ipc/msg.c
79142@@ -296,18 +296,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
79143 return security_msg_queue_associate(msq, msgflg);
79144 }
79145
79146+static struct ipc_ops msg_ops = {
79147+ .getnew = newque,
79148+ .associate = msg_security,
79149+ .more_checks = NULL
79150+};
79151+
79152 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
79153 {
79154 struct ipc_namespace *ns;
79155- struct ipc_ops msg_ops;
79156 struct ipc_params msg_params;
79157
79158 ns = current->nsproxy->ipc_ns;
79159
79160- msg_ops.getnew = newque;
79161- msg_ops.associate = msg_security;
79162- msg_ops.more_checks = NULL;
79163-
79164 msg_params.key = key;
79165 msg_params.flg = msgflg;
79166
79167diff --git a/ipc/sem.c b/ipc/sem.c
79168index 70480a3..f4e8262 100644
79169--- a/ipc/sem.c
79170+++ b/ipc/sem.c
79171@@ -460,10 +460,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
79172 return 0;
79173 }
79174
79175+static struct ipc_ops sem_ops = {
79176+ .getnew = newary,
79177+ .associate = sem_security,
79178+ .more_checks = sem_more_checks
79179+};
79180+
79181 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
79182 {
79183 struct ipc_namespace *ns;
79184- struct ipc_ops sem_ops;
79185 struct ipc_params sem_params;
79186
79187 ns = current->nsproxy->ipc_ns;
79188@@ -471,10 +476,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
79189 if (nsems < 0 || nsems > ns->sc_semmsl)
79190 return -EINVAL;
79191
79192- sem_ops.getnew = newary;
79193- sem_ops.associate = sem_security;
79194- sem_ops.more_checks = sem_more_checks;
79195-
79196 sem_params.key = key;
79197 sem_params.flg = semflg;
79198 sem_params.u.nsems = nsems;
79199diff --git a/ipc/shm.c b/ipc/shm.c
79200index 7e199fa..180a1ca 100644
79201--- a/ipc/shm.c
79202+++ b/ipc/shm.c
79203@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
79204 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
79205 #endif
79206
79207+#ifdef CONFIG_GRKERNSEC
79208+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
79209+ const time_t shm_createtime, const kuid_t cuid,
79210+ const int shmid);
79211+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
79212+ const time_t shm_createtime);
79213+#endif
79214+
79215 void shm_init_ns(struct ipc_namespace *ns)
79216 {
79217 ns->shm_ctlmax = SHMMAX;
79218@@ -531,6 +539,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
79219 shp->shm_lprid = 0;
79220 shp->shm_atim = shp->shm_dtim = 0;
79221 shp->shm_ctim = get_seconds();
79222+#ifdef CONFIG_GRKERNSEC
79223+ {
79224+ struct timespec timeval;
79225+ do_posix_clock_monotonic_gettime(&timeval);
79226+
79227+ shp->shm_createtime = timeval.tv_sec;
79228+ }
79229+#endif
79230 shp->shm_segsz = size;
79231 shp->shm_nattch = 0;
79232 shp->shm_file = file;
79233@@ -582,18 +598,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
79234 return 0;
79235 }
79236
79237+static struct ipc_ops shm_ops = {
79238+ .getnew = newseg,
79239+ .associate = shm_security,
79240+ .more_checks = shm_more_checks
79241+};
79242+
79243 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
79244 {
79245 struct ipc_namespace *ns;
79246- struct ipc_ops shm_ops;
79247 struct ipc_params shm_params;
79248
79249 ns = current->nsproxy->ipc_ns;
79250
79251- shm_ops.getnew = newseg;
79252- shm_ops.associate = shm_security;
79253- shm_ops.more_checks = shm_more_checks;
79254-
79255 shm_params.key = key;
79256 shm_params.flg = shmflg;
79257 shm_params.u.size = size;
79258@@ -1014,6 +1031,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
79259 f_mode = FMODE_READ | FMODE_WRITE;
79260 }
79261 if (shmflg & SHM_EXEC) {
79262+
79263+#ifdef CONFIG_PAX_MPROTECT
79264+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
79265+ goto out;
79266+#endif
79267+
79268 prot |= PROT_EXEC;
79269 acc_mode |= S_IXUGO;
79270 }
79271@@ -1037,9 +1060,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
79272 if (err)
79273 goto out_unlock;
79274
79275+#ifdef CONFIG_GRKERNSEC
79276+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
79277+ shp->shm_perm.cuid, shmid) ||
79278+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
79279+ err = -EACCES;
79280+ goto out_unlock;
79281+ }
79282+#endif
79283+
79284 path = shp->shm_file->f_path;
79285 path_get(&path);
79286 shp->shm_nattch++;
79287+#ifdef CONFIG_GRKERNSEC
79288+ shp->shm_lapid = current->pid;
79289+#endif
79290 size = i_size_read(path.dentry->d_inode);
79291 shm_unlock(shp);
79292
79293diff --git a/kernel/acct.c b/kernel/acct.c
79294index 8d6e145..33e0b1e 100644
79295--- a/kernel/acct.c
79296+++ b/kernel/acct.c
79297@@ -556,7 +556,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
79298 */
79299 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
79300 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
79301- file->f_op->write(file, (char *)&ac,
79302+ file->f_op->write(file, (char __force_user *)&ac,
79303 sizeof(acct_t), &file->f_pos);
79304 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
79305 set_fs(fs);
79306diff --git a/kernel/audit.c b/kernel/audit.c
79307index 91e53d0..d9e3ec4 100644
79308--- a/kernel/audit.c
79309+++ b/kernel/audit.c
79310@@ -118,7 +118,7 @@ u32 audit_sig_sid = 0;
79311 3) suppressed due to audit_rate_limit
79312 4) suppressed due to audit_backlog_limit
79313 */
79314-static atomic_t audit_lost = ATOMIC_INIT(0);
79315+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
79316
79317 /* The netlink socket. */
79318 static struct sock *audit_sock;
79319@@ -240,7 +240,7 @@ void audit_log_lost(const char *message)
79320 unsigned long now;
79321 int print;
79322
79323- atomic_inc(&audit_lost);
79324+ atomic_inc_unchecked(&audit_lost);
79325
79326 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
79327
79328@@ -259,7 +259,7 @@ void audit_log_lost(const char *message)
79329 printk(KERN_WARNING
79330 "audit: audit_lost=%d audit_rate_limit=%d "
79331 "audit_backlog_limit=%d\n",
79332- atomic_read(&audit_lost),
79333+ atomic_read_unchecked(&audit_lost),
79334 audit_rate_limit,
79335 audit_backlog_limit);
79336 audit_panic(message);
79337@@ -664,7 +664,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
79338 status_set.pid = audit_pid;
79339 status_set.rate_limit = audit_rate_limit;
79340 status_set.backlog_limit = audit_backlog_limit;
79341- status_set.lost = atomic_read(&audit_lost);
79342+ status_set.lost = atomic_read_unchecked(&audit_lost);
79343 status_set.backlog = skb_queue_len(&audit_skb_queue);
79344 audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0,
79345 &status_set, sizeof(status_set));
79346diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
79347index 6bd4a90..0ee9eff 100644
79348--- a/kernel/auditfilter.c
79349+++ b/kernel/auditfilter.c
79350@@ -423,7 +423,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
79351 f->lsm_rule = NULL;
79352
79353 /* Support legacy tests for a valid loginuid */
79354- if ((f->type == AUDIT_LOGINUID) && (f->val == 4294967295)) {
79355+ if ((f->type == AUDIT_LOGINUID) && (f->val == 4294967295U)) {
79356 f->type = AUDIT_LOGINUID_SET;
79357 f->val = 0;
79358 }
79359diff --git a/kernel/auditsc.c b/kernel/auditsc.c
79360index 3c8a601..3a416f6 100644
79361--- a/kernel/auditsc.c
79362+++ b/kernel/auditsc.c
79363@@ -1956,7 +1956,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
79364 }
79365
79366 /* global counter which is incremented every time something logs in */
79367-static atomic_t session_id = ATOMIC_INIT(0);
79368+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
79369
79370 /**
79371 * audit_set_loginuid - set current task's audit_context loginuid
79372@@ -1980,7 +1980,7 @@ int audit_set_loginuid(kuid_t loginuid)
79373 return -EPERM;
79374 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
79375
79376- sessionid = atomic_inc_return(&session_id);
79377+ sessionid = atomic_inc_return_unchecked(&session_id);
79378 if (context && context->in_syscall) {
79379 struct audit_buffer *ab;
79380
79381diff --git a/kernel/capability.c b/kernel/capability.c
79382index f6c2ce5..982c0f9 100644
79383--- a/kernel/capability.c
79384+++ b/kernel/capability.c
79385@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
79386 * before modification is attempted and the application
79387 * fails.
79388 */
79389+ if (tocopy > ARRAY_SIZE(kdata))
79390+ return -EFAULT;
79391+
79392 if (copy_to_user(dataptr, kdata, tocopy
79393 * sizeof(struct __user_cap_data_struct))) {
79394 return -EFAULT;
79395@@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
79396 int ret;
79397
79398 rcu_read_lock();
79399- ret = security_capable(__task_cred(t), ns, cap);
79400+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
79401+ gr_task_is_capable(t, __task_cred(t), cap);
79402 rcu_read_unlock();
79403
79404- return (ret == 0);
79405+ return ret;
79406 }
79407
79408 /**
79409@@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
79410 int ret;
79411
79412 rcu_read_lock();
79413- ret = security_capable_noaudit(__task_cred(t), ns, cap);
79414+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
79415 rcu_read_unlock();
79416
79417- return (ret == 0);
79418+ return ret;
79419 }
79420
79421 /**
79422@@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
79423 BUG();
79424 }
79425
79426- if (security_capable(current_cred(), ns, cap) == 0) {
79427+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
79428 current->flags |= PF_SUPERPRIV;
79429 return true;
79430 }
79431@@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
79432 }
79433 EXPORT_SYMBOL(ns_capable);
79434
79435+bool ns_capable_nolog(struct user_namespace *ns, int cap)
79436+{
79437+ if (unlikely(!cap_valid(cap))) {
79438+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
79439+ BUG();
79440+ }
79441+
79442+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
79443+ current->flags |= PF_SUPERPRIV;
79444+ return true;
79445+ }
79446+ return false;
79447+}
79448+EXPORT_SYMBOL(ns_capable_nolog);
79449+
79450 /**
79451 * file_ns_capable - Determine if the file's opener had a capability in effect
79452 * @file: The file we want to check
79453@@ -432,6 +451,12 @@ bool capable(int cap)
79454 }
79455 EXPORT_SYMBOL(capable);
79456
79457+bool capable_nolog(int cap)
79458+{
79459+ return ns_capable_nolog(&init_user_ns, cap);
79460+}
79461+EXPORT_SYMBOL(capable_nolog);
79462+
79463 /**
79464 * nsown_capable - Check superior capability to one's own user_ns
79465 * @cap: The capability in question
79466@@ -464,3 +489,10 @@ bool inode_capable(const struct inode *inode, int cap)
79467
79468 return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
79469 }
79470+
79471+bool inode_capable_nolog(const struct inode *inode, int cap)
79472+{
79473+ struct user_namespace *ns = current_user_ns();
79474+
79475+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
79476+}
79477diff --git a/kernel/cgroup.c b/kernel/cgroup.c
79478index 2e9b387..61817b1 100644
79479--- a/kernel/cgroup.c
79480+++ b/kernel/cgroup.c
79481@@ -5398,7 +5398,7 @@ static int cgroup_css_links_read(struct cgroup *cont,
79482 struct css_set *cg = link->cg;
79483 struct task_struct *task;
79484 int count = 0;
79485- seq_printf(seq, "css_set %p\n", cg);
79486+ seq_printf(seq, "css_set %pK\n", cg);
79487 list_for_each_entry(task, &cg->tasks, cg_list) {
79488 if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
79489 seq_puts(seq, " ...\n");
79490diff --git a/kernel/compat.c b/kernel/compat.c
79491index 0a09e48..f44f3f0 100644
79492--- a/kernel/compat.c
79493+++ b/kernel/compat.c
79494@@ -13,6 +13,7 @@
79495
79496 #include <linux/linkage.h>
79497 #include <linux/compat.h>
79498+#include <linux/module.h>
79499 #include <linux/errno.h>
79500 #include <linux/time.h>
79501 #include <linux/signal.h>
79502@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
79503 mm_segment_t oldfs;
79504 long ret;
79505
79506- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
79507+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
79508 oldfs = get_fs();
79509 set_fs(KERNEL_DS);
79510 ret = hrtimer_nanosleep_restart(restart);
79511@@ -252,7 +253,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
79512 oldfs = get_fs();
79513 set_fs(KERNEL_DS);
79514 ret = hrtimer_nanosleep(&tu,
79515- rmtp ? (struct timespec __user *)&rmt : NULL,
79516+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
79517 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
79518 set_fs(oldfs);
79519
79520@@ -361,7 +362,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
79521 mm_segment_t old_fs = get_fs();
79522
79523 set_fs(KERNEL_DS);
79524- ret = sys_sigpending((old_sigset_t __user *) &s);
79525+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
79526 set_fs(old_fs);
79527 if (ret == 0)
79528 ret = put_user(s, set);
79529@@ -451,7 +452,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
79530 mm_segment_t old_fs = get_fs();
79531
79532 set_fs(KERNEL_DS);
79533- ret = sys_old_getrlimit(resource, &r);
79534+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
79535 set_fs(old_fs);
79536
79537 if (!ret) {
79538@@ -533,8 +534,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
79539 set_fs (KERNEL_DS);
79540 ret = sys_wait4(pid,
79541 (stat_addr ?
79542- (unsigned int __user *) &status : NULL),
79543- options, (struct rusage __user *) &r);
79544+ (unsigned int __force_user *) &status : NULL),
79545+ options, (struct rusage __force_user *) &r);
79546 set_fs (old_fs);
79547
79548 if (ret > 0) {
79549@@ -560,8 +561,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
79550 memset(&info, 0, sizeof(info));
79551
79552 set_fs(KERNEL_DS);
79553- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
79554- uru ? (struct rusage __user *)&ru : NULL);
79555+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
79556+ uru ? (struct rusage __force_user *)&ru : NULL);
79557 set_fs(old_fs);
79558
79559 if ((ret < 0) || (info.si_signo == 0))
79560@@ -695,8 +696,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
79561 oldfs = get_fs();
79562 set_fs(KERNEL_DS);
79563 err = sys_timer_settime(timer_id, flags,
79564- (struct itimerspec __user *) &newts,
79565- (struct itimerspec __user *) &oldts);
79566+ (struct itimerspec __force_user *) &newts,
79567+ (struct itimerspec __force_user *) &oldts);
79568 set_fs(oldfs);
79569 if (!err && old && put_compat_itimerspec(old, &oldts))
79570 return -EFAULT;
79571@@ -713,7 +714,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
79572 oldfs = get_fs();
79573 set_fs(KERNEL_DS);
79574 err = sys_timer_gettime(timer_id,
79575- (struct itimerspec __user *) &ts);
79576+ (struct itimerspec __force_user *) &ts);
79577 set_fs(oldfs);
79578 if (!err && put_compat_itimerspec(setting, &ts))
79579 return -EFAULT;
79580@@ -732,7 +733,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
79581 oldfs = get_fs();
79582 set_fs(KERNEL_DS);
79583 err = sys_clock_settime(which_clock,
79584- (struct timespec __user *) &ts);
79585+ (struct timespec __force_user *) &ts);
79586 set_fs(oldfs);
79587 return err;
79588 }
79589@@ -747,7 +748,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
79590 oldfs = get_fs();
79591 set_fs(KERNEL_DS);
79592 err = sys_clock_gettime(which_clock,
79593- (struct timespec __user *) &ts);
79594+ (struct timespec __force_user *) &ts);
79595 set_fs(oldfs);
79596 if (!err && put_compat_timespec(&ts, tp))
79597 return -EFAULT;
79598@@ -767,7 +768,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
79599
79600 oldfs = get_fs();
79601 set_fs(KERNEL_DS);
79602- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
79603+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
79604 set_fs(oldfs);
79605
79606 err = compat_put_timex(utp, &txc);
79607@@ -787,7 +788,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
79608 oldfs = get_fs();
79609 set_fs(KERNEL_DS);
79610 err = sys_clock_getres(which_clock,
79611- (struct timespec __user *) &ts);
79612+ (struct timespec __force_user *) &ts);
79613 set_fs(oldfs);
79614 if (!err && tp && put_compat_timespec(&ts, tp))
79615 return -EFAULT;
79616@@ -799,9 +800,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
79617 long err;
79618 mm_segment_t oldfs;
79619 struct timespec tu;
79620- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
79621+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
79622
79623- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
79624+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
79625 oldfs = get_fs();
79626 set_fs(KERNEL_DS);
79627 err = clock_nanosleep_restart(restart);
79628@@ -833,8 +834,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
79629 oldfs = get_fs();
79630 set_fs(KERNEL_DS);
79631 err = sys_clock_nanosleep(which_clock, flags,
79632- (struct timespec __user *) &in,
79633- (struct timespec __user *) &out);
79634+ (struct timespec __force_user *) &in,
79635+ (struct timespec __force_user *) &out);
79636 set_fs(oldfs);
79637
79638 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
79639diff --git a/kernel/configs.c b/kernel/configs.c
79640index c18b1f1..b9a0132 100644
79641--- a/kernel/configs.c
79642+++ b/kernel/configs.c
79643@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
79644 struct proc_dir_entry *entry;
79645
79646 /* create the current config file */
79647+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
79648+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
79649+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
79650+ &ikconfig_file_ops);
79651+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
79652+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
79653+ &ikconfig_file_ops);
79654+#endif
79655+#else
79656 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
79657 &ikconfig_file_ops);
79658+#endif
79659+
79660 if (!entry)
79661 return -ENOMEM;
79662
79663diff --git a/kernel/cred.c b/kernel/cred.c
79664index e0573a4..3874e41 100644
79665--- a/kernel/cred.c
79666+++ b/kernel/cred.c
79667@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
79668 validate_creds(cred);
79669 alter_cred_subscribers(cred, -1);
79670 put_cred(cred);
79671+
79672+#ifdef CONFIG_GRKERNSEC_SETXID
79673+ cred = (struct cred *) tsk->delayed_cred;
79674+ if (cred != NULL) {
79675+ tsk->delayed_cred = NULL;
79676+ validate_creds(cred);
79677+ alter_cred_subscribers(cred, -1);
79678+ put_cred(cred);
79679+ }
79680+#endif
79681 }
79682
79683 /**
79684@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
79685 * Always returns 0 thus allowing this function to be tail-called at the end
79686 * of, say, sys_setgid().
79687 */
79688-int commit_creds(struct cred *new)
79689+static int __commit_creds(struct cred *new)
79690 {
79691 struct task_struct *task = current;
79692 const struct cred *old = task->real_cred;
79693@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
79694
79695 get_cred(new); /* we will require a ref for the subj creds too */
79696
79697+ gr_set_role_label(task, new->uid, new->gid);
79698+
79699 /* dumpability changes */
79700 if (!uid_eq(old->euid, new->euid) ||
79701 !gid_eq(old->egid, new->egid) ||
79702@@ -479,6 +491,102 @@ int commit_creds(struct cred *new)
79703 put_cred(old);
79704 return 0;
79705 }
79706+#ifdef CONFIG_GRKERNSEC_SETXID
79707+extern int set_user(struct cred *new);
79708+
79709+void gr_delayed_cred_worker(void)
79710+{
79711+ const struct cred *new = current->delayed_cred;
79712+ struct cred *ncred;
79713+
79714+ current->delayed_cred = NULL;
79715+
79716+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
79717+ // from doing get_cred on it when queueing this
79718+ put_cred(new);
79719+ return;
79720+ } else if (new == NULL)
79721+ return;
79722+
79723+ ncred = prepare_creds();
79724+ if (!ncred)
79725+ goto die;
79726+ // uids
79727+ ncred->uid = new->uid;
79728+ ncred->euid = new->euid;
79729+ ncred->suid = new->suid;
79730+ ncred->fsuid = new->fsuid;
79731+ // gids
79732+ ncred->gid = new->gid;
79733+ ncred->egid = new->egid;
79734+ ncred->sgid = new->sgid;
79735+ ncred->fsgid = new->fsgid;
79736+ // groups
79737+ if (set_groups(ncred, new->group_info) < 0) {
79738+ abort_creds(ncred);
79739+ goto die;
79740+ }
79741+ // caps
79742+ ncred->securebits = new->securebits;
79743+ ncred->cap_inheritable = new->cap_inheritable;
79744+ ncred->cap_permitted = new->cap_permitted;
79745+ ncred->cap_effective = new->cap_effective;
79746+ ncred->cap_bset = new->cap_bset;
79747+
79748+ if (set_user(ncred)) {
79749+ abort_creds(ncred);
79750+ goto die;
79751+ }
79752+
79753+ // from doing get_cred on it when queueing this
79754+ put_cred(new);
79755+
79756+ __commit_creds(ncred);
79757+ return;
79758+die:
79759+ // from doing get_cred on it when queueing this
79760+ put_cred(new);
79761+ do_group_exit(SIGKILL);
79762+}
79763+#endif
79764+
79765+int commit_creds(struct cred *new)
79766+{
79767+#ifdef CONFIG_GRKERNSEC_SETXID
79768+ int ret;
79769+ int schedule_it = 0;
79770+ struct task_struct *t;
79771+
79772+ /* we won't get called with tasklist_lock held for writing
79773+ and interrupts disabled as the cred struct in that case is
79774+ init_cred
79775+ */
79776+ if (grsec_enable_setxid && !current_is_single_threaded() &&
79777+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
79778+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
79779+ schedule_it = 1;
79780+ }
79781+ ret = __commit_creds(new);
79782+ if (schedule_it) {
79783+ rcu_read_lock();
79784+ read_lock(&tasklist_lock);
79785+ for (t = next_thread(current); t != current;
79786+ t = next_thread(t)) {
79787+ if (t->delayed_cred == NULL) {
79788+ t->delayed_cred = get_cred(new);
79789+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
79790+ set_tsk_need_resched(t);
79791+ }
79792+ }
79793+ read_unlock(&tasklist_lock);
79794+ rcu_read_unlock();
79795+ }
79796+ return ret;
79797+#else
79798+ return __commit_creds(new);
79799+#endif
79800+}
79801+
79802 EXPORT_SYMBOL(commit_creds);
79803
79804 /**
79805diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
79806index 0506d44..2c20034 100644
79807--- a/kernel/debug/debug_core.c
79808+++ b/kernel/debug/debug_core.c
79809@@ -123,7 +123,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
79810 */
79811 static atomic_t masters_in_kgdb;
79812 static atomic_t slaves_in_kgdb;
79813-static atomic_t kgdb_break_tasklet_var;
79814+static atomic_unchecked_t kgdb_break_tasklet_var;
79815 atomic_t kgdb_setting_breakpoint;
79816
79817 struct task_struct *kgdb_usethread;
79818@@ -133,7 +133,7 @@ int kgdb_single_step;
79819 static pid_t kgdb_sstep_pid;
79820
79821 /* to keep track of the CPU which is doing the single stepping*/
79822-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
79823+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
79824
79825 /*
79826 * If you are debugging a problem where roundup (the collection of
79827@@ -541,7 +541,7 @@ return_normal:
79828 * kernel will only try for the value of sstep_tries before
79829 * giving up and continuing on.
79830 */
79831- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
79832+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
79833 (kgdb_info[cpu].task &&
79834 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
79835 atomic_set(&kgdb_active, -1);
79836@@ -635,8 +635,8 @@ cpu_master_loop:
79837 }
79838
79839 kgdb_restore:
79840- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
79841- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
79842+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
79843+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
79844 if (kgdb_info[sstep_cpu].task)
79845 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
79846 else
79847@@ -888,18 +888,18 @@ static void kgdb_unregister_callbacks(void)
79848 static void kgdb_tasklet_bpt(unsigned long ing)
79849 {
79850 kgdb_breakpoint();
79851- atomic_set(&kgdb_break_tasklet_var, 0);
79852+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
79853 }
79854
79855 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
79856
79857 void kgdb_schedule_breakpoint(void)
79858 {
79859- if (atomic_read(&kgdb_break_tasklet_var) ||
79860+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
79861 atomic_read(&kgdb_active) != -1 ||
79862 atomic_read(&kgdb_setting_breakpoint))
79863 return;
79864- atomic_inc(&kgdb_break_tasklet_var);
79865+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
79866 tasklet_schedule(&kgdb_tasklet_breakpoint);
79867 }
79868 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
79869diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
79870index 00eb8f7..d7e3244 100644
79871--- a/kernel/debug/kdb/kdb_main.c
79872+++ b/kernel/debug/kdb/kdb_main.c
79873@@ -1974,7 +1974,7 @@ static int kdb_lsmod(int argc, const char **argv)
79874 continue;
79875
79876 kdb_printf("%-20s%8u 0x%p ", mod->name,
79877- mod->core_size, (void *)mod);
79878+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
79879 #ifdef CONFIG_MODULE_UNLOAD
79880 kdb_printf("%4ld ", module_refcount(mod));
79881 #endif
79882@@ -1984,7 +1984,7 @@ static int kdb_lsmod(int argc, const char **argv)
79883 kdb_printf(" (Loading)");
79884 else
79885 kdb_printf(" (Live)");
79886- kdb_printf(" 0x%p", mod->module_core);
79887+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
79888
79889 #ifdef CONFIG_MODULE_UNLOAD
79890 {
79891diff --git a/kernel/events/core.c b/kernel/events/core.c
79892index e76e495..cbfe63a 100644
79893--- a/kernel/events/core.c
79894+++ b/kernel/events/core.c
79895@@ -156,8 +156,15 @@ static struct srcu_struct pmus_srcu;
79896 * 0 - disallow raw tracepoint access for unpriv
79897 * 1 - disallow cpu events for unpriv
79898 * 2 - disallow kernel profiling for unpriv
79899+ * 3 - disallow all unpriv perf event use
79900 */
79901-int sysctl_perf_event_paranoid __read_mostly = 1;
79902+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
79903+int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
79904+#elif defined(CONFIG_GRKERNSEC_HIDESYM)
79905+int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
79906+#else
79907+int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
79908+#endif
79909
79910 /* Minimum for 512 kiB + 1 user control page */
79911 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
79912@@ -184,7 +191,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
79913 return 0;
79914 }
79915
79916-static atomic64_t perf_event_id;
79917+static atomic64_unchecked_t perf_event_id;
79918
79919 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
79920 enum event_type_t event_type);
79921@@ -2747,7 +2754,7 @@ static void __perf_event_read(void *info)
79922
79923 static inline u64 perf_event_count(struct perf_event *event)
79924 {
79925- return local64_read(&event->count) + atomic64_read(&event->child_count);
79926+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
79927 }
79928
79929 static u64 perf_event_read(struct perf_event *event)
79930@@ -3093,9 +3100,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
79931 mutex_lock(&event->child_mutex);
79932 total += perf_event_read(event);
79933 *enabled += event->total_time_enabled +
79934- atomic64_read(&event->child_total_time_enabled);
79935+ atomic64_read_unchecked(&event->child_total_time_enabled);
79936 *running += event->total_time_running +
79937- atomic64_read(&event->child_total_time_running);
79938+ atomic64_read_unchecked(&event->child_total_time_running);
79939
79940 list_for_each_entry(child, &event->child_list, child_list) {
79941 total += perf_event_read(child);
79942@@ -3481,10 +3488,10 @@ void perf_event_update_userpage(struct perf_event *event)
79943 userpg->offset -= local64_read(&event->hw.prev_count);
79944
79945 userpg->time_enabled = enabled +
79946- atomic64_read(&event->child_total_time_enabled);
79947+ atomic64_read_unchecked(&event->child_total_time_enabled);
79948
79949 userpg->time_running = running +
79950- atomic64_read(&event->child_total_time_running);
79951+ atomic64_read_unchecked(&event->child_total_time_running);
79952
79953 arch_perf_update_userpage(userpg, now);
79954
79955@@ -4034,7 +4041,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
79956
79957 /* Data. */
79958 sp = perf_user_stack_pointer(regs);
79959- rem = __output_copy_user(handle, (void *) sp, dump_size);
79960+ rem = __output_copy_user(handle, (void __user *) sp, dump_size);
79961 dyn_size = dump_size - rem;
79962
79963 perf_output_skip(handle, rem);
79964@@ -4122,11 +4129,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
79965 values[n++] = perf_event_count(event);
79966 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
79967 values[n++] = enabled +
79968- atomic64_read(&event->child_total_time_enabled);
79969+ atomic64_read_unchecked(&event->child_total_time_enabled);
79970 }
79971 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
79972 values[n++] = running +
79973- atomic64_read(&event->child_total_time_running);
79974+ atomic64_read_unchecked(&event->child_total_time_running);
79975 }
79976 if (read_format & PERF_FORMAT_ID)
79977 values[n++] = primary_event_id(event);
79978@@ -4835,12 +4842,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
79979 * need to add enough zero bytes after the string to handle
79980 * the 64bit alignment we do later.
79981 */
79982- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
79983+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
79984 if (!buf) {
79985 name = strncpy(tmp, "//enomem", sizeof(tmp));
79986 goto got_name;
79987 }
79988- name = d_path(&file->f_path, buf, PATH_MAX);
79989+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
79990 if (IS_ERR(name)) {
79991 name = strncpy(tmp, "//toolong", sizeof(tmp));
79992 goto got_name;
79993@@ -6262,7 +6269,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
79994 event->parent = parent_event;
79995
79996 event->ns = get_pid_ns(task_active_pid_ns(current));
79997- event->id = atomic64_inc_return(&perf_event_id);
79998+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
79999
80000 event->state = PERF_EVENT_STATE_INACTIVE;
80001
80002@@ -6572,6 +6579,11 @@ SYSCALL_DEFINE5(perf_event_open,
80003 if (flags & ~PERF_FLAG_ALL)
80004 return -EINVAL;
80005
80006+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
80007+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
80008+ return -EACCES;
80009+#endif
80010+
80011 err = perf_copy_attr(attr_uptr, &attr);
80012 if (err)
80013 return err;
80014@@ -6904,10 +6916,10 @@ static void sync_child_event(struct perf_event *child_event,
80015 /*
80016 * Add back the child's count to the parent's count:
80017 */
80018- atomic64_add(child_val, &parent_event->child_count);
80019- atomic64_add(child_event->total_time_enabled,
80020+ atomic64_add_unchecked(child_val, &parent_event->child_count);
80021+ atomic64_add_unchecked(child_event->total_time_enabled,
80022 &parent_event->child_total_time_enabled);
80023- atomic64_add(child_event->total_time_running,
80024+ atomic64_add_unchecked(child_event->total_time_running,
80025 &parent_event->child_total_time_running);
80026
80027 /*
80028diff --git a/kernel/events/internal.h b/kernel/events/internal.h
e2b79cd1 80029index ca65997..60df03d 100644
bb5f0bf8
AF
80030--- a/kernel/events/internal.h
80031+++ b/kernel/events/internal.h
80032@@ -81,10 +81,10 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
80033 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
80034 }
80035
80036-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
e2b79cd1 80037-static inline unsigned int \
bb5f0bf8 80038+#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user) \
e2b79cd1 80039+static inline unsigned long \
bb5f0bf8
AF
80040 func_name(struct perf_output_handle *handle, \
80041- const void *buf, unsigned int len) \
e2b79cd1 80042+ const void user *buf, unsigned long len) \
bb5f0bf8
AF
80043 { \
80044 unsigned long size, written; \
80045 \
80046@@ -116,17 +116,17 @@ static inline int memcpy_common(void *dst, const void *src, size_t n)
80047 return n;
80048 }
80049
80050-DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
80051+DEFINE_OUTPUT_COPY(__output_copy, memcpy_common, )
80052
80053 #define MEMCPY_SKIP(dst, src, n) (n)
80054
80055-DEFINE_OUTPUT_COPY(__output_skip, MEMCPY_SKIP)
80056+DEFINE_OUTPUT_COPY(__output_skip, MEMCPY_SKIP, )
80057
80058 #ifndef arch_perf_out_copy_user
80059 #define arch_perf_out_copy_user __copy_from_user_inatomic
80060 #endif
80061
80062-DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
80063+DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user)
80064
80065 /* Callchain handling */
80066 extern struct perf_callchain_entry *
e2b79cd1
AF
80067diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
80068index f356974..cb8c570 100644
80069--- a/kernel/events/uprobes.c
80070+++ b/kernel/events/uprobes.c
80071@@ -1556,7 +1556,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
80072 {
80073 struct page *page;
80074 uprobe_opcode_t opcode;
80075- int result;
80076+ long result;
80077
80078 pagefault_disable();
80079 result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
bb5f0bf8
AF
80080diff --git a/kernel/exit.c b/kernel/exit.c
80081index 7bb73f9..d7978ed 100644
80082--- a/kernel/exit.c
80083+++ b/kernel/exit.c
80084@@ -172,6 +172,10 @@ void release_task(struct task_struct * p)
80085 struct task_struct *leader;
80086 int zap_leader;
80087 repeat:
80088+#ifdef CONFIG_NET
80089+ gr_del_task_from_ip_table(p);
80090+#endif
80091+
80092 /* don't need to get the RCU readlock here - the process is dead and
80093 * can't be modifying its own credentials. But shut RCU-lockdep up */
80094 rcu_read_lock();
80095@@ -340,7 +344,7 @@ int allow_signal(int sig)
80096 * know it'll be handled, so that they don't get converted to
80097 * SIGKILL or just silently dropped.
80098 */
80099- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
80100+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
80101 recalc_sigpending();
80102 spin_unlock_irq(&current->sighand->siglock);
80103 return 0;
80104@@ -709,6 +713,8 @@ void do_exit(long code)
80105 struct task_struct *tsk = current;
80106 int group_dead;
80107
80108+ set_fs(USER_DS);
80109+
80110 profile_task_exit(tsk);
80111
80112 WARN_ON(blk_needs_flush_plug(tsk));
80113@@ -725,7 +731,6 @@ void do_exit(long code)
80114 * mm_release()->clear_child_tid() from writing to a user-controlled
80115 * kernel address.
80116 */
80117- set_fs(USER_DS);
80118
80119 ptrace_event(PTRACE_EVENT_EXIT, code);
80120
80121@@ -784,6 +789,9 @@ void do_exit(long code)
80122 tsk->exit_code = code;
80123 taskstats_exit(tsk, group_dead);
80124
80125+ gr_acl_handle_psacct(tsk, code);
80126+ gr_acl_handle_exit();
80127+
80128 exit_mm(tsk);
80129
80130 if (group_dead)
80131@@ -905,7 +913,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
80132 * Take down every thread in the group. This is called by fatal signals
80133 * as well as by sys_exit_group (below).
80134 */
80135-void
80136+__noreturn void
80137 do_group_exit(int exit_code)
80138 {
80139 struct signal_struct *sig = current->signal;
80140diff --git a/kernel/fork.c b/kernel/fork.c
80141index ffbc090..08ceeee 100644
80142--- a/kernel/fork.c
80143+++ b/kernel/fork.c
80144@@ -319,7 +319,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
80145 *stackend = STACK_END_MAGIC; /* for overflow detection */
80146
80147 #ifdef CONFIG_CC_STACKPROTECTOR
80148- tsk->stack_canary = get_random_int();
80149+ tsk->stack_canary = pax_get_random_long();
80150 #endif
80151
80152 /*
80153@@ -345,13 +345,81 @@ free_tsk:
80154 }
80155
80156 #ifdef CONFIG_MMU
80157+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
80158+{
80159+ struct vm_area_struct *tmp;
80160+ unsigned long charge;
80161+ struct mempolicy *pol;
80162+ struct file *file;
80163+
80164+ charge = 0;
80165+ if (mpnt->vm_flags & VM_ACCOUNT) {
80166+ unsigned long len = vma_pages(mpnt);
80167+
80168+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
80169+ goto fail_nomem;
80170+ charge = len;
80171+ }
80172+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
80173+ if (!tmp)
80174+ goto fail_nomem;
80175+ *tmp = *mpnt;
80176+ tmp->vm_mm = mm;
80177+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
80178+ pol = mpol_dup(vma_policy(mpnt));
80179+ if (IS_ERR(pol))
80180+ goto fail_nomem_policy;
80181+ vma_set_policy(tmp, pol);
80182+ if (anon_vma_fork(tmp, mpnt))
80183+ goto fail_nomem_anon_vma_fork;
80184+ tmp->vm_flags &= ~VM_LOCKED;
80185+ tmp->vm_next = tmp->vm_prev = NULL;
80186+ tmp->vm_mirror = NULL;
80187+ file = tmp->vm_file;
80188+ if (file) {
80189+ struct inode *inode = file_inode(file);
80190+ struct address_space *mapping = file->f_mapping;
80191+
80192+ get_file(file);
80193+ if (tmp->vm_flags & VM_DENYWRITE)
80194+ atomic_dec(&inode->i_writecount);
80195+ mutex_lock(&mapping->i_mmap_mutex);
80196+ if (tmp->vm_flags & VM_SHARED)
80197+ mapping->i_mmap_writable++;
80198+ flush_dcache_mmap_lock(mapping);
80199+ /* insert tmp into the share list, just after mpnt */
80200+ if (unlikely(tmp->vm_flags & VM_NONLINEAR))
80201+ vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
80202+ else
80203+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
80204+ flush_dcache_mmap_unlock(mapping);
80205+ mutex_unlock(&mapping->i_mmap_mutex);
80206+ }
80207+
80208+ /*
80209+ * Clear hugetlb-related page reserves for children. This only
80210+ * affects MAP_PRIVATE mappings. Faults generated by the child
80211+ * are not guaranteed to succeed, even if read-only
80212+ */
80213+ if (is_vm_hugetlb_page(tmp))
80214+ reset_vma_resv_huge_pages(tmp);
80215+
80216+ return tmp;
80217+
80218+fail_nomem_anon_vma_fork:
80219+ mpol_put(pol);
80220+fail_nomem_policy:
80221+ kmem_cache_free(vm_area_cachep, tmp);
80222+fail_nomem:
80223+ vm_unacct_memory(charge);
80224+ return NULL;
80225+}
80226+
80227 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
80228 {
80229 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
80230 struct rb_node **rb_link, *rb_parent;
80231 int retval;
80232- unsigned long charge;
80233- struct mempolicy *pol;
80234
80235 uprobe_start_dup_mmap();
80236 down_write(&oldmm->mmap_sem);
80237@@ -365,8 +433,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
80238 mm->locked_vm = 0;
80239 mm->mmap = NULL;
80240 mm->mmap_cache = NULL;
80241- mm->free_area_cache = oldmm->mmap_base;
80242- mm->cached_hole_size = ~0UL;
80243+ mm->free_area_cache = oldmm->free_area_cache;
80244+ mm->cached_hole_size = oldmm->cached_hole_size;
80245 mm->map_count = 0;
80246 cpumask_clear(mm_cpumask(mm));
80247 mm->mm_rb = RB_ROOT;
80248@@ -382,57 +450,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
80249
80250 prev = NULL;
80251 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
80252- struct file *file;
80253-
80254 if (mpnt->vm_flags & VM_DONTCOPY) {
80255 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
80256 -vma_pages(mpnt));
80257 continue;
80258 }
80259- charge = 0;
80260- if (mpnt->vm_flags & VM_ACCOUNT) {
80261- unsigned long len = vma_pages(mpnt);
80262-
80263- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
80264- goto fail_nomem;
80265- charge = len;
80266- }
80267- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
80268- if (!tmp)
80269- goto fail_nomem;
80270- *tmp = *mpnt;
80271- INIT_LIST_HEAD(&tmp->anon_vma_chain);
80272- pol = mpol_dup(vma_policy(mpnt));
80273- retval = PTR_ERR(pol);
80274- if (IS_ERR(pol))
80275- goto fail_nomem_policy;
80276- vma_set_policy(tmp, pol);
80277- tmp->vm_mm = mm;
80278- if (anon_vma_fork(tmp, mpnt))
80279- goto fail_nomem_anon_vma_fork;
80280- tmp->vm_flags &= ~VM_LOCKED;
80281- tmp->vm_next = tmp->vm_prev = NULL;
80282- file = tmp->vm_file;
80283- if (file) {
80284- struct inode *inode = file_inode(file);
80285- struct address_space *mapping = file->f_mapping;
80286-
80287- get_file(file);
80288- if (tmp->vm_flags & VM_DENYWRITE)
80289- atomic_dec(&inode->i_writecount);
80290- mutex_lock(&mapping->i_mmap_mutex);
80291- if (tmp->vm_flags & VM_SHARED)
80292- mapping->i_mmap_writable++;
80293- flush_dcache_mmap_lock(mapping);
80294- /* insert tmp into the share list, just after mpnt */
80295- if (unlikely(tmp->vm_flags & VM_NONLINEAR))
80296- vma_nonlinear_insert(tmp,
80297- &mapping->i_mmap_nonlinear);
80298- else
80299- vma_interval_tree_insert_after(tmp, mpnt,
80300- &mapping->i_mmap);
80301- flush_dcache_mmap_unlock(mapping);
80302- mutex_unlock(&mapping->i_mmap_mutex);
80303+ tmp = dup_vma(mm, oldmm, mpnt);
80304+ if (!tmp) {
80305+ retval = -ENOMEM;
80306+ goto out;
80307 }
80308
80309 /*
80310@@ -464,6 +490,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
80311 if (retval)
80312 goto out;
80313 }
80314+
80315+#ifdef CONFIG_PAX_SEGMEXEC
80316+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
80317+ struct vm_area_struct *mpnt_m;
80318+
80319+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
80320+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
80321+
80322+ if (!mpnt->vm_mirror)
80323+ continue;
80324+
80325+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
80326+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
80327+ mpnt->vm_mirror = mpnt_m;
80328+ } else {
80329+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
80330+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
80331+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
80332+ mpnt->vm_mirror->vm_mirror = mpnt;
80333+ }
80334+ }
80335+ BUG_ON(mpnt_m);
80336+ }
80337+#endif
80338+
80339 /* a new mm has just been created */
80340 arch_dup_mmap(oldmm, mm);
80341 retval = 0;
80342@@ -473,14 +524,6 @@ out:
80343 up_write(&oldmm->mmap_sem);
80344 uprobe_end_dup_mmap();
80345 return retval;
80346-fail_nomem_anon_vma_fork:
80347- mpol_put(pol);
80348-fail_nomem_policy:
80349- kmem_cache_free(vm_area_cachep, tmp);
80350-fail_nomem:
80351- retval = -ENOMEM;
80352- vm_unacct_memory(charge);
80353- goto out;
80354 }
80355
80356 static inline int mm_alloc_pgd(struct mm_struct *mm)
80357@@ -695,8 +738,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
80358 return ERR_PTR(err);
80359
80360 mm = get_task_mm(task);
80361- if (mm && mm != current->mm &&
80362- !ptrace_may_access(task, mode)) {
80363+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
80364+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
80365 mmput(mm);
80366 mm = ERR_PTR(-EACCES);
80367 }
80368@@ -918,13 +961,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
80369 spin_unlock(&fs->lock);
80370 return -EAGAIN;
80371 }
80372- fs->users++;
80373+ atomic_inc(&fs->users);
80374 spin_unlock(&fs->lock);
80375 return 0;
80376 }
80377 tsk->fs = copy_fs_struct(fs);
80378 if (!tsk->fs)
80379 return -ENOMEM;
80380+ /* Carry through gr_chroot_dentry and is_chrooted instead
80381+ of recomputing it here. Already copied when the task struct
80382+ is duplicated. This allows pivot_root to not be treated as
80383+ a chroot
80384+ */
80385+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
80386+
80387 return 0;
80388 }
80389
80390@@ -1197,10 +1247,13 @@ static struct task_struct *copy_process(unsigned long clone_flags,
80391 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
80392 #endif
80393 retval = -EAGAIN;
80394+
80395+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
80396+
80397 if (atomic_read(&p->real_cred->user->processes) >=
80398 task_rlimit(p, RLIMIT_NPROC)) {
80399- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
80400- p->real_cred->user != INIT_USER)
80401+ if (p->real_cred->user != INIT_USER &&
80402+ !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
80403 goto bad_fork_free;
80404 }
80405 current->flags &= ~PF_NPROC_EXCEEDED;
80406@@ -1446,6 +1499,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
80407 goto bad_fork_free_pid;
80408 }
80409
80410+ /* synchronizes with gr_set_acls()
80411+ we need to call this past the point of no return for fork()
80412+ */
80413+ gr_copy_label(p);
80414+
80415 if (clone_flags & CLONE_THREAD) {
80416 current->signal->nr_threads++;
80417 atomic_inc(&current->signal->live);
80418@@ -1529,6 +1587,8 @@ bad_fork_cleanup_count:
80419 bad_fork_free:
80420 free_task(p);
80421 fork_out:
80422+ gr_log_forkfail(retval);
80423+
80424 return ERR_PTR(retval);
80425 }
80426
80427@@ -1613,6 +1673,8 @@ long do_fork(unsigned long clone_flags,
80428 if (clone_flags & CLONE_PARENT_SETTID)
80429 put_user(nr, parent_tidptr);
80430
80431+ gr_handle_brute_check();
80432+
80433 if (clone_flags & CLONE_VFORK) {
80434 p->vfork_done = &vfork;
80435 init_completion(&vfork);
80436@@ -1729,7 +1791,7 @@ void __init proc_caches_init(void)
80437 mm_cachep = kmem_cache_create("mm_struct",
80438 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
80439 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
80440- vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
80441+ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE);
80442 mmap_init();
80443 nsproxy_cache_init();
80444 }
80445@@ -1769,7 +1831,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
80446 return 0;
80447
80448 /* don't need lock here; in the worst case we'll do useless copy */
80449- if (fs->users == 1)
80450+ if (atomic_read(&fs->users) == 1)
80451 return 0;
80452
80453 *new_fsp = copy_fs_struct(fs);
80454@@ -1881,7 +1943,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
80455 fs = current->fs;
80456 spin_lock(&fs->lock);
80457 current->fs = new_fs;
80458- if (--fs->users)
80459+ gr_set_chroot_entries(current, &current->fs->root);
80460+ if (atomic_dec_return(&fs->users))
80461 new_fs = NULL;
80462 else
80463 new_fs = fs;
80464diff --git a/kernel/futex.c b/kernel/futex.c
e2b79cd1 80465index 49dacfb..2ac4526 100644
bb5f0bf8
AF
80466--- a/kernel/futex.c
80467+++ b/kernel/futex.c
80468@@ -54,6 +54,7 @@
80469 #include <linux/mount.h>
80470 #include <linux/pagemap.h>
80471 #include <linux/syscalls.h>
80472+#include <linux/ptrace.h>
80473 #include <linux/signal.h>
80474 #include <linux/export.h>
80475 #include <linux/magic.h>
80476@@ -242,6 +243,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
80477 struct page *page, *page_head;
80478 int err, ro = 0;
80479
80480+#ifdef CONFIG_PAX_SEGMEXEC
80481+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
80482+ return -EFAULT;
80483+#endif
80484+
80485 /*
80486 * The futex address must be "naturally" aligned.
80487 */
e2b79cd1
AF
80488@@ -440,7 +446,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
80489
80490 static int get_futex_value_locked(u32 *dest, u32 __user *from)
80491 {
80492- int ret;
80493+ unsigned long ret;
80494
80495 pagefault_disable();
80496 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
bb5f0bf8
AF
80497@@ -2733,6 +2739,7 @@ static int __init futex_init(void)
80498 {
80499 u32 curval;
80500 int i;
80501+ mm_segment_t oldfs;
80502
80503 /*
80504 * This will fail and we want it. Some arch implementations do
80505@@ -2744,8 +2751,11 @@ static int __init futex_init(void)
80506 * implementation, the non-functional ones will return
80507 * -ENOSYS.
80508 */
80509+ oldfs = get_fs();
80510+ set_fs(USER_DS);
80511 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
80512 futex_cmpxchg_enabled = 1;
80513+ set_fs(oldfs);
80514
80515 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
80516 plist_head_init(&futex_queues[i].chain);
80517diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
80518index f9f44fd..29885e4 100644
80519--- a/kernel/futex_compat.c
80520+++ b/kernel/futex_compat.c
80521@@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
80522 return 0;
80523 }
80524
80525-static void __user *futex_uaddr(struct robust_list __user *entry,
80526+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
80527 compat_long_t futex_offset)
80528 {
80529 compat_uptr_t base = ptr_to_compat(entry);
80530diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
80531index 9b22d03..6295b62 100644
80532--- a/kernel/gcov/base.c
80533+++ b/kernel/gcov/base.c
80534@@ -102,11 +102,6 @@ void gcov_enable_events(void)
80535 }
80536
80537 #ifdef CONFIG_MODULES
80538-static inline int within(void *addr, void *start, unsigned long size)
80539-{
80540- return ((addr >= start) && (addr < start + size));
80541-}
80542-
80543 /* Update list and generate events when modules are unloaded. */
80544 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
80545 void *data)
80546@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
80547 prev = NULL;
80548 /* Remove entries located in module from linked list. */
80549 for (info = gcov_info_head; info; info = info->next) {
80550- if (within(info, mod->module_core, mod->core_size)) {
80551+ if (within_module_core_rw((unsigned long)info, mod)) {
80552 if (prev)
80553 prev->next = info->next;
80554 else
80555diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
80556index 2288fbd..0f3941f 100644
80557--- a/kernel/hrtimer.c
80558+++ b/kernel/hrtimer.c
80559@@ -1435,7 +1435,7 @@ void hrtimer_peek_ahead_timers(void)
80560 local_irq_restore(flags);
80561 }
80562
80563-static void run_hrtimer_softirq(struct softirq_action *h)
80564+static void run_hrtimer_softirq(void)
80565 {
80566 hrtimer_peek_ahead_timers();
80567 }
80568@@ -1770,7 +1770,7 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
80569 return NOTIFY_OK;
80570 }
80571
80572-static struct notifier_block __cpuinitdata hrtimers_nb = {
80573+static struct notifier_block hrtimers_nb = {
80574 .notifier_call = hrtimer_cpu_notify,
80575 };
80576
80577diff --git a/kernel/irq_work.c b/kernel/irq_work.c
80578index 55fcce6..0e4cf34 100644
80579--- a/kernel/irq_work.c
80580+++ b/kernel/irq_work.c
80581@@ -189,12 +189,13 @@ static int irq_work_cpu_notify(struct notifier_block *self,
80582 return NOTIFY_OK;
80583 }
80584
80585-static struct notifier_block cpu_notify;
80586+static struct notifier_block cpu_notify = {
80587+ .notifier_call = irq_work_cpu_notify,
80588+ .priority = 0,
80589+};
80590
80591 static __init int irq_work_init_cpu_notifier(void)
80592 {
80593- cpu_notify.notifier_call = irq_work_cpu_notify;
80594- cpu_notify.priority = 0;
80595 register_cpu_notifier(&cpu_notify);
80596 return 0;
80597 }
80598diff --git a/kernel/jump_label.c b/kernel/jump_label.c
80599index 60f48fa..7f3a770 100644
80600--- a/kernel/jump_label.c
80601+++ b/kernel/jump_label.c
80602@@ -13,6 +13,7 @@
80603 #include <linux/sort.h>
80604 #include <linux/err.h>
80605 #include <linux/static_key.h>
80606+#include <linux/mm.h>
80607
80608 #ifdef HAVE_JUMP_LABEL
80609
80610@@ -50,7 +51,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
80611
80612 size = (((unsigned long)stop - (unsigned long)start)
80613 / sizeof(struct jump_entry));
80614+ pax_open_kernel();
80615 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
80616+ pax_close_kernel();
80617 }
80618
80619 static void jump_label_update(struct static_key *key, int enable);
80620@@ -357,10 +360,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
80621 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
80622 struct jump_entry *iter;
80623
80624+ pax_open_kernel();
80625 for (iter = iter_start; iter < iter_stop; iter++) {
80626 if (within_module_init(iter->code, mod))
80627 iter->code = 0;
80628 }
80629+ pax_close_kernel();
80630 }
80631
80632 static int
80633diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
80634index 3127ad5..159d880 100644
80635--- a/kernel/kallsyms.c
80636+++ b/kernel/kallsyms.c
80637@@ -11,6 +11,9 @@
80638 * Changed the compression method from stem compression to "table lookup"
80639 * compression (see scripts/kallsyms.c for a more complete description)
80640 */
80641+#ifdef CONFIG_GRKERNSEC_HIDESYM
80642+#define __INCLUDED_BY_HIDESYM 1
80643+#endif
80644 #include <linux/kallsyms.h>
80645 #include <linux/module.h>
80646 #include <linux/init.h>
80647@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
80648
80649 static inline int is_kernel_inittext(unsigned long addr)
80650 {
80651+ if (system_state != SYSTEM_BOOTING)
80652+ return 0;
80653+
80654 if (addr >= (unsigned long)_sinittext
80655 && addr <= (unsigned long)_einittext)
80656 return 1;
80657 return 0;
80658 }
80659
80660+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
80661+#ifdef CONFIG_MODULES
80662+static inline int is_module_text(unsigned long addr)
80663+{
80664+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
80665+ return 1;
80666+
80667+ addr = ktla_ktva(addr);
80668+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
80669+}
80670+#else
80671+static inline int is_module_text(unsigned long addr)
80672+{
80673+ return 0;
80674+}
80675+#endif
80676+#endif
80677+
80678 static inline int is_kernel_text(unsigned long addr)
80679 {
80680 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
80681@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
80682
80683 static inline int is_kernel(unsigned long addr)
80684 {
80685+
80686+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
80687+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
80688+ return 1;
80689+
80690+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
80691+#else
80692 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
80693+#endif
80694+
80695 return 1;
80696 return in_gate_area_no_mm(addr);
80697 }
80698
80699 static int is_ksym_addr(unsigned long addr)
80700 {
80701+
80702+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
80703+ if (is_module_text(addr))
80704+ return 0;
80705+#endif
80706+
80707 if (all_var)
80708 return is_kernel(addr);
80709
80710@@ -480,7 +519,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
80711
80712 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
80713 {
80714- iter->name[0] = '\0';
80715 iter->nameoff = get_symbol_offset(new_pos);
80716 iter->pos = new_pos;
80717 }
80718@@ -528,6 +566,11 @@ static int s_show(struct seq_file *m, void *p)
80719 {
80720 struct kallsym_iter *iter = m->private;
80721
80722+#ifdef CONFIG_GRKERNSEC_HIDESYM
80723+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
80724+ return 0;
80725+#endif
80726+
80727 /* Some debugging symbols have no name. Ignore them. */
80728 if (!iter->name[0])
80729 return 0;
80730@@ -541,6 +584,7 @@ static int s_show(struct seq_file *m, void *p)
80731 */
80732 type = iter->exported ? toupper(iter->type) :
80733 tolower(iter->type);
80734+
80735 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
80736 type, iter->name, iter->module_name);
80737 } else
80738@@ -566,7 +610,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
80739 struct kallsym_iter *iter;
80740 int ret;
80741
80742- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
80743+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
80744 if (!iter)
80745 return -ENOMEM;
80746 reset_iter(iter, 0);
80747diff --git a/kernel/kcmp.c b/kernel/kcmp.c
80748index e30ac0f..3528cac 100644
80749--- a/kernel/kcmp.c
80750+++ b/kernel/kcmp.c
80751@@ -99,6 +99,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
80752 struct task_struct *task1, *task2;
80753 int ret;
80754
80755+#ifdef CONFIG_GRKERNSEC
80756+ return -ENOSYS;
80757+#endif
80758+
80759 rcu_read_lock();
80760
80761 /*
80762diff --git a/kernel/kexec.c b/kernel/kexec.c
80763index 59f7b55..4022f65 100644
80764--- a/kernel/kexec.c
80765+++ b/kernel/kexec.c
80766@@ -1041,7 +1041,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
80767 unsigned long flags)
80768 {
80769 struct compat_kexec_segment in;
80770- struct kexec_segment out, __user *ksegments;
80771+ struct kexec_segment out;
80772+ struct kexec_segment __user *ksegments;
80773 unsigned long i, result;
80774
80775 /* Don't allow clients that don't understand the native
80776diff --git a/kernel/kmod.c b/kernel/kmod.c
80777index 8241906..d625f2c 100644
80778--- a/kernel/kmod.c
80779+++ b/kernel/kmod.c
80780@@ -75,7 +75,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
80781 kfree(info->argv);
80782 }
80783
80784-static int call_modprobe(char *module_name, int wait)
80785+static int call_modprobe(char *module_name, char *module_param, int wait)
80786 {
80787 struct subprocess_info *info;
80788 static char *envp[] = {
80789@@ -85,7 +85,7 @@ static int call_modprobe(char *module_name, int wait)
80790 NULL
80791 };
80792
80793- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
80794+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
80795 if (!argv)
80796 goto out;
80797
80798@@ -97,7 +97,8 @@ static int call_modprobe(char *module_name, int wait)
80799 argv[1] = "-q";
80800 argv[2] = "--";
80801 argv[3] = module_name; /* check free_modprobe_argv() */
80802- argv[4] = NULL;
80803+ argv[4] = module_param;
80804+ argv[5] = NULL;
80805
80806 info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
80807 NULL, free_modprobe_argv, NULL);
80808@@ -129,9 +130,8 @@ out:
80809 * If module auto-loading support is disabled then this function
80810 * becomes a no-operation.
80811 */
80812-int __request_module(bool wait, const char *fmt, ...)
80813+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
80814 {
80815- va_list args;
80816 char module_name[MODULE_NAME_LEN];
80817 unsigned int max_modprobes;
80818 int ret;
80819@@ -147,9 +147,7 @@ int __request_module(bool wait, const char *fmt, ...)
80820 */
80821 WARN_ON_ONCE(wait && current_is_async());
80822
80823- va_start(args, fmt);
80824- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
80825- va_end(args);
80826+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
80827 if (ret >= MODULE_NAME_LEN)
80828 return -ENAMETOOLONG;
80829
80830@@ -157,6 +155,20 @@ int __request_module(bool wait, const char *fmt, ...)
80831 if (ret)
80832 return ret;
80833
80834+#ifdef CONFIG_GRKERNSEC_MODHARDEN
80835+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
80836+ /* hack to workaround consolekit/udisks stupidity */
80837+ read_lock(&tasklist_lock);
80838+ if (!strcmp(current->comm, "mount") &&
80839+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
80840+ read_unlock(&tasklist_lock);
80841+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
80842+ return -EPERM;
80843+ }
80844+ read_unlock(&tasklist_lock);
80845+ }
80846+#endif
80847+
80848 /* If modprobe needs a service that is in a module, we get a recursive
80849 * loop. Limit the number of running kmod threads to max_threads/2 or
80850 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
80851@@ -185,11 +197,52 @@ int __request_module(bool wait, const char *fmt, ...)
80852
80853 trace_module_request(module_name, wait, _RET_IP_);
80854
80855- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
80856+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
80857
80858 atomic_dec(&kmod_concurrent);
80859 return ret;
80860 }
80861+
80862+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
80863+{
80864+ va_list args;
80865+ int ret;
80866+
80867+ va_start(args, fmt);
80868+ ret = ____request_module(wait, module_param, fmt, args);
80869+ va_end(args);
80870+
80871+ return ret;
80872+}
80873+
80874+int __request_module(bool wait, const char *fmt, ...)
80875+{
80876+ va_list args;
80877+ int ret;
80878+
80879+#ifdef CONFIG_GRKERNSEC_MODHARDEN
80880+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
80881+ char module_param[MODULE_NAME_LEN];
80882+
80883+ memset(module_param, 0, sizeof(module_param));
80884+
80885+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
80886+
80887+ va_start(args, fmt);
80888+ ret = ____request_module(wait, module_param, fmt, args);
80889+ va_end(args);
80890+
80891+ return ret;
80892+ }
80893+#endif
80894+
80895+ va_start(args, fmt);
80896+ ret = ____request_module(wait, NULL, fmt, args);
80897+ va_end(args);
80898+
80899+ return ret;
80900+}
80901+
80902 EXPORT_SYMBOL(__request_module);
80903 #endif /* CONFIG_MODULES */
80904
80905@@ -300,7 +353,7 @@ static int wait_for_helper(void *data)
80906 *
80907 * Thus the __user pointer cast is valid here.
80908 */
80909- sys_wait4(pid, (int __user *)&ret, 0, NULL);
80910+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
80911
80912 /*
80913 * If ret is 0, either ____call_usermodehelper failed and the
80914@@ -651,7 +704,7 @@ EXPORT_SYMBOL(call_usermodehelper);
80915 static int proc_cap_handler(struct ctl_table *table, int write,
80916 void __user *buffer, size_t *lenp, loff_t *ppos)
80917 {
80918- struct ctl_table t;
80919+ ctl_table_no_const t;
80920 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
80921 kernel_cap_t new_cap;
80922 int err, i;
80923diff --git a/kernel/kprobes.c b/kernel/kprobes.c
80924index bddf3b2..233bf40 100644
80925--- a/kernel/kprobes.c
80926+++ b/kernel/kprobes.c
80927@@ -31,6 +31,9 @@
80928 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
80929 * <prasanna@in.ibm.com> added function-return probes.
80930 */
80931+#ifdef CONFIG_GRKERNSEC_HIDESYM
80932+#define __INCLUDED_BY_HIDESYM 1
80933+#endif
80934 #include <linux/kprobes.h>
80935 #include <linux/hash.h>
80936 #include <linux/init.h>
80937@@ -185,7 +188,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
80938 * kernel image and loaded module images reside. This is required
80939 * so x86_64 can correctly handle the %rip-relative fixups.
80940 */
80941- kip->insns = module_alloc(PAGE_SIZE);
80942+ kip->insns = module_alloc_exec(PAGE_SIZE);
80943 if (!kip->insns) {
80944 kfree(kip);
80945 return NULL;
80946@@ -225,7 +228,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
80947 */
80948 if (!list_is_singular(&kip->list)) {
80949 list_del(&kip->list);
80950- module_free(NULL, kip->insns);
80951+ module_free_exec(NULL, kip->insns);
80952 kfree(kip);
80953 }
80954 return 1;
80955@@ -2083,7 +2086,7 @@ static int __init init_kprobes(void)
80956 {
80957 int i, err = 0;
80958 unsigned long offset = 0, size = 0;
80959- char *modname, namebuf[128];
80960+ char *modname, namebuf[KSYM_NAME_LEN];
80961 const char *symbol_name;
80962 void *addr;
80963 struct kprobe_blackpoint *kb;
80964@@ -2168,11 +2171,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
80965 kprobe_type = "k";
80966
80967 if (sym)
80968- seq_printf(pi, "%p %s %s+0x%x %s ",
80969+ seq_printf(pi, "%pK %s %s+0x%x %s ",
80970 p->addr, kprobe_type, sym, offset,
80971 (modname ? modname : " "));
80972 else
80973- seq_printf(pi, "%p %s %p ",
80974+ seq_printf(pi, "%pK %s %pK ",
80975 p->addr, kprobe_type, p->addr);
80976
80977 if (!pp)
80978@@ -2209,7 +2212,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
80979 const char *sym = NULL;
80980 unsigned int i = *(loff_t *) v;
80981 unsigned long offset = 0;
80982- char *modname, namebuf[128];
80983+ char *modname, namebuf[KSYM_NAME_LEN];
80984
80985 head = &kprobe_table[i];
80986 preempt_disable();
80987diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
80988index 6ada93c..dce7d5d 100644
80989--- a/kernel/ksysfs.c
80990+++ b/kernel/ksysfs.c
80991@@ -46,6 +46,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
80992 {
80993 if (count+1 > UEVENT_HELPER_PATH_LEN)
80994 return -ENOENT;
80995+ if (!capable(CAP_SYS_ADMIN))
80996+ return -EPERM;
80997 memcpy(uevent_helper, buf, count);
80998 uevent_helper[count] = '\0';
80999 if (count && uevent_helper[count-1] == '\n')
81000@@ -172,7 +174,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
81001 return count;
81002 }
81003
81004-static struct bin_attribute notes_attr = {
81005+static bin_attribute_no_const notes_attr __read_only = {
81006 .attr = {
81007 .name = "notes",
81008 .mode = S_IRUGO,
81009diff --git a/kernel/lockdep.c b/kernel/lockdep.c
81010index 1f3186b..bb7dbc6 100644
81011--- a/kernel/lockdep.c
81012+++ b/kernel/lockdep.c
81013@@ -596,6 +596,10 @@ static int static_obj(void *obj)
81014 end = (unsigned long) &_end,
81015 addr = (unsigned long) obj;
81016
81017+#ifdef CONFIG_PAX_KERNEXEC
81018+ start = ktla_ktva(start);
81019+#endif
81020+
81021 /*
81022 * static variable?
81023 */
81024@@ -736,6 +740,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
81025 if (!static_obj(lock->key)) {
81026 debug_locks_off();
81027 printk("INFO: trying to register non-static key.\n");
81028+ printk("lock:%pS key:%pS.\n", lock, lock->key);
81029 printk("the code is fine but needs lockdep annotation.\n");
81030 printk("turning off the locking correctness validator.\n");
81031 dump_stack();
81032@@ -3080,7 +3085,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
81033 if (!class)
81034 return 0;
81035 }
81036- atomic_inc((atomic_t *)&class->ops);
81037+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
81038 if (very_verbose(class)) {
81039 printk("\nacquire class [%p] %s", class->key, class->name);
81040 if (class->name_version > 1)
81041diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
81042index b2c71c5..7b88d63 100644
81043--- a/kernel/lockdep_proc.c
81044+++ b/kernel/lockdep_proc.c
81045@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
81046 return 0;
81047 }
81048
81049- seq_printf(m, "%p", class->key);
81050+ seq_printf(m, "%pK", class->key);
81051 #ifdef CONFIG_DEBUG_LOCKDEP
81052 seq_printf(m, " OPS:%8ld", class->ops);
81053 #endif
81054@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
81055
81056 list_for_each_entry(entry, &class->locks_after, entry) {
81057 if (entry->distance == 1) {
81058- seq_printf(m, " -> [%p] ", entry->class->key);
81059+ seq_printf(m, " -> [%pK] ", entry->class->key);
81060 print_name(m, entry->class);
81061 seq_puts(m, "\n");
81062 }
81063@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
81064 if (!class->key)
81065 continue;
81066
81067- seq_printf(m, "[%p] ", class->key);
81068+ seq_printf(m, "[%pK] ", class->key);
81069 print_name(m, class);
81070 seq_puts(m, "\n");
81071 }
81072@@ -495,7 +495,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
81073 if (!i)
81074 seq_line(m, '-', 40-namelen, namelen);
81075
81076- snprintf(ip, sizeof(ip), "[<%p>]",
81077+ snprintf(ip, sizeof(ip), "[<%pK>]",
81078 (void *)class->contention_point[i]);
81079 seq_printf(m, "%40s %14lu %29s %pS\n",
81080 name, stats->contention_point[i],
81081@@ -510,7 +510,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
81082 if (!i)
81083 seq_line(m, '-', 40-namelen, namelen);
81084
81085- snprintf(ip, sizeof(ip), "[<%p>]",
81086+ snprintf(ip, sizeof(ip), "[<%pK>]",
81087 (void *)class->contending_point[i]);
81088 seq_printf(m, "%40s %14lu %29s %pS\n",
81089 name, stats->contending_point[i],
81090diff --git a/kernel/module.c b/kernel/module.c
81091index fa53db8..6f17200 100644
81092--- a/kernel/module.c
81093+++ b/kernel/module.c
81094@@ -61,6 +61,7 @@
81095 #include <linux/pfn.h>
81096 #include <linux/bsearch.h>
81097 #include <linux/fips.h>
81098+#include <linux/grsecurity.h>
81099 #include <uapi/linux/module.h>
81100 #include "module-internal.h"
81101
81102@@ -156,7 +157,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
81103
81104 /* Bounds of module allocation, for speeding __module_address.
81105 * Protected by module_mutex. */
81106-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
81107+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
81108+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
81109
81110 int register_module_notifier(struct notifier_block * nb)
81111 {
81112@@ -323,7 +325,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
81113 return true;
81114
81115 list_for_each_entry_rcu(mod, &modules, list) {
81116- struct symsearch arr[] = {
81117+ struct symsearch modarr[] = {
81118 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
81119 NOT_GPL_ONLY, false },
81120 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
81121@@ -348,7 +350,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
81122 if (mod->state == MODULE_STATE_UNFORMED)
81123 continue;
81124
81125- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
81126+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
81127 return true;
81128 }
81129 return false;
81130@@ -485,7 +487,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
81131 static int percpu_modalloc(struct module *mod,
81132 unsigned long size, unsigned long align)
81133 {
81134- if (align > PAGE_SIZE) {
81135+ if (align-1 >= PAGE_SIZE) {
81136 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
81137 mod->name, align, PAGE_SIZE);
81138 align = PAGE_SIZE;
81139@@ -1089,7 +1091,7 @@ struct module_attribute module_uevent =
81140 static ssize_t show_coresize(struct module_attribute *mattr,
81141 struct module_kobject *mk, char *buffer)
81142 {
81143- return sprintf(buffer, "%u\n", mk->mod->core_size);
81144+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
81145 }
81146
81147 static struct module_attribute modinfo_coresize =
81148@@ -1098,7 +1100,7 @@ static struct module_attribute modinfo_coresize =
81149 static ssize_t show_initsize(struct module_attribute *mattr,
81150 struct module_kobject *mk, char *buffer)
81151 {
81152- return sprintf(buffer, "%u\n", mk->mod->init_size);
81153+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
81154 }
81155
81156 static struct module_attribute modinfo_initsize =
81157@@ -1313,7 +1315,7 @@ resolve_symbol_wait(struct module *mod,
81158 */
81159 #ifdef CONFIG_SYSFS
81160
81161-#ifdef CONFIG_KALLSYMS
81162+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
81163 static inline bool sect_empty(const Elf_Shdr *sect)
81164 {
81165 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
81166@@ -1453,7 +1455,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
81167 {
81168 unsigned int notes, loaded, i;
81169 struct module_notes_attrs *notes_attrs;
81170- struct bin_attribute *nattr;
81171+ bin_attribute_no_const *nattr;
81172
81173 /* failed to create section attributes, so can't create notes */
81174 if (!mod->sect_attrs)
81175@@ -1565,7 +1567,7 @@ static void del_usage_links(struct module *mod)
81176 static int module_add_modinfo_attrs(struct module *mod)
81177 {
81178 struct module_attribute *attr;
81179- struct module_attribute *temp_attr;
81180+ module_attribute_no_const *temp_attr;
81181 int error = 0;
81182 int i;
81183
81184@@ -1779,21 +1781,21 @@ static void set_section_ro_nx(void *base,
81185
81186 static void unset_module_core_ro_nx(struct module *mod)
81187 {
81188- set_page_attributes(mod->module_core + mod->core_text_size,
81189- mod->module_core + mod->core_size,
81190+ set_page_attributes(mod->module_core_rw,
81191+ mod->module_core_rw + mod->core_size_rw,
81192 set_memory_x);
81193- set_page_attributes(mod->module_core,
81194- mod->module_core + mod->core_ro_size,
81195+ set_page_attributes(mod->module_core_rx,
81196+ mod->module_core_rx + mod->core_size_rx,
81197 set_memory_rw);
81198 }
81199
81200 static void unset_module_init_ro_nx(struct module *mod)
81201 {
81202- set_page_attributes(mod->module_init + mod->init_text_size,
81203- mod->module_init + mod->init_size,
81204+ set_page_attributes(mod->module_init_rw,
81205+ mod->module_init_rw + mod->init_size_rw,
81206 set_memory_x);
81207- set_page_attributes(mod->module_init,
81208- mod->module_init + mod->init_ro_size,
81209+ set_page_attributes(mod->module_init_rx,
81210+ mod->module_init_rx + mod->init_size_rx,
81211 set_memory_rw);
81212 }
81213
81214@@ -1806,14 +1808,14 @@ void set_all_modules_text_rw(void)
81215 list_for_each_entry_rcu(mod, &modules, list) {
81216 if (mod->state == MODULE_STATE_UNFORMED)
81217 continue;
81218- if ((mod->module_core) && (mod->core_text_size)) {
81219- set_page_attributes(mod->module_core,
81220- mod->module_core + mod->core_text_size,
81221+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
81222+ set_page_attributes(mod->module_core_rx,
81223+ mod->module_core_rx + mod->core_size_rx,
81224 set_memory_rw);
81225 }
81226- if ((mod->module_init) && (mod->init_text_size)) {
81227- set_page_attributes(mod->module_init,
81228- mod->module_init + mod->init_text_size,
81229+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
81230+ set_page_attributes(mod->module_init_rx,
81231+ mod->module_init_rx + mod->init_size_rx,
81232 set_memory_rw);
81233 }
81234 }
81235@@ -1829,14 +1831,14 @@ void set_all_modules_text_ro(void)
81236 list_for_each_entry_rcu(mod, &modules, list) {
81237 if (mod->state == MODULE_STATE_UNFORMED)
81238 continue;
81239- if ((mod->module_core) && (mod->core_text_size)) {
81240- set_page_attributes(mod->module_core,
81241- mod->module_core + mod->core_text_size,
81242+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
81243+ set_page_attributes(mod->module_core_rx,
81244+ mod->module_core_rx + mod->core_size_rx,
81245 set_memory_ro);
81246 }
81247- if ((mod->module_init) && (mod->init_text_size)) {
81248- set_page_attributes(mod->module_init,
81249- mod->module_init + mod->init_text_size,
81250+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
81251+ set_page_attributes(mod->module_init_rx,
81252+ mod->module_init_rx + mod->init_size_rx,
81253 set_memory_ro);
81254 }
81255 }
81256@@ -1887,16 +1889,19 @@ static void free_module(struct module *mod)
81257
81258 /* This may be NULL, but that's OK */
81259 unset_module_init_ro_nx(mod);
81260- module_free(mod, mod->module_init);
81261+ module_free(mod, mod->module_init_rw);
81262+ module_free_exec(mod, mod->module_init_rx);
81263 kfree(mod->args);
81264 percpu_modfree(mod);
81265
81266 /* Free lock-classes: */
81267- lockdep_free_key_range(mod->module_core, mod->core_size);
81268+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
81269+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
81270
81271 /* Finally, free the core (containing the module structure) */
81272 unset_module_core_ro_nx(mod);
81273- module_free(mod, mod->module_core);
81274+ module_free_exec(mod, mod->module_core_rx);
81275+ module_free(mod, mod->module_core_rw);
81276
81277 #ifdef CONFIG_MPU
81278 update_protections(current->mm);
81279@@ -1966,9 +1971,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
81280 int ret = 0;
81281 const struct kernel_symbol *ksym;
81282
81283+#ifdef CONFIG_GRKERNSEC_MODHARDEN
81284+ int is_fs_load = 0;
81285+ int register_filesystem_found = 0;
81286+ char *p;
81287+
81288+ p = strstr(mod->args, "grsec_modharden_fs");
81289+ if (p) {
81290+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
81291+ /* copy \0 as well */
81292+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
81293+ is_fs_load = 1;
81294+ }
81295+#endif
81296+
81297 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
81298 const char *name = info->strtab + sym[i].st_name;
81299
81300+#ifdef CONFIG_GRKERNSEC_MODHARDEN
81301+ /* it's a real shame this will never get ripped and copied
81302+ upstream! ;(
81303+ */
81304+ if (is_fs_load && !strcmp(name, "register_filesystem"))
81305+ register_filesystem_found = 1;
81306+#endif
81307+
81308 switch (sym[i].st_shndx) {
81309 case SHN_COMMON:
81310 /* We compiled with -fno-common. These are not
81311@@ -1989,7 +2016,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
81312 ksym = resolve_symbol_wait(mod, info, name);
81313 /* Ok if resolved. */
81314 if (ksym && !IS_ERR(ksym)) {
81315+ pax_open_kernel();
81316 sym[i].st_value = ksym->value;
81317+ pax_close_kernel();
81318 break;
81319 }
81320
81321@@ -2008,11 +2037,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
81322 secbase = (unsigned long)mod_percpu(mod);
81323 else
81324 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
81325+ pax_open_kernel();
81326 sym[i].st_value += secbase;
81327+ pax_close_kernel();
81328 break;
81329 }
81330 }
81331
81332+#ifdef CONFIG_GRKERNSEC_MODHARDEN
81333+ if (is_fs_load && !register_filesystem_found) {
81334+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
81335+ ret = -EPERM;
81336+ }
81337+#endif
81338+
81339 return ret;
81340 }
81341
81342@@ -2096,22 +2134,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
81343 || s->sh_entsize != ~0UL
81344 || strstarts(sname, ".init"))
81345 continue;
81346- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
81347+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
81348+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
81349+ else
81350+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
81351 pr_debug("\t%s\n", sname);
81352 }
81353- switch (m) {
81354- case 0: /* executable */
81355- mod->core_size = debug_align(mod->core_size);
81356- mod->core_text_size = mod->core_size;
81357- break;
81358- case 1: /* RO: text and ro-data */
81359- mod->core_size = debug_align(mod->core_size);
81360- mod->core_ro_size = mod->core_size;
81361- break;
81362- case 3: /* whole core */
81363- mod->core_size = debug_align(mod->core_size);
81364- break;
81365- }
81366 }
81367
81368 pr_debug("Init section allocation order:\n");
81369@@ -2125,23 +2153,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
81370 || s->sh_entsize != ~0UL
81371 || !strstarts(sname, ".init"))
81372 continue;
81373- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
81374- | INIT_OFFSET_MASK);
81375+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
81376+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
81377+ else
81378+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
81379+ s->sh_entsize |= INIT_OFFSET_MASK;
81380 pr_debug("\t%s\n", sname);
81381 }
81382- switch (m) {
81383- case 0: /* executable */
81384- mod->init_size = debug_align(mod->init_size);
81385- mod->init_text_size = mod->init_size;
81386- break;
81387- case 1: /* RO: text and ro-data */
81388- mod->init_size = debug_align(mod->init_size);
81389- mod->init_ro_size = mod->init_size;
81390- break;
81391- case 3: /* whole init */
81392- mod->init_size = debug_align(mod->init_size);
81393- break;
81394- }
81395 }
81396 }
81397
81398@@ -2314,7 +2332,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
81399
81400 /* Put symbol section at end of init part of module. */
81401 symsect->sh_flags |= SHF_ALLOC;
81402- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
81403+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
81404 info->index.sym) | INIT_OFFSET_MASK;
81405 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
81406
81407@@ -2331,13 +2349,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
81408 }
81409
81410 /* Append room for core symbols at end of core part. */
81411- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
81412- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
81413- mod->core_size += strtab_size;
81414+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
81415+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
81416+ mod->core_size_rx += strtab_size;
81417
81418 /* Put string table section at end of init part of module. */
81419 strsect->sh_flags |= SHF_ALLOC;
81420- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
81421+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
81422 info->index.str) | INIT_OFFSET_MASK;
81423 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
81424 }
81425@@ -2355,12 +2373,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
81426 /* Make sure we get permanent strtab: don't use info->strtab. */
81427 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
81428
81429+ pax_open_kernel();
81430+
81431 /* Set types up while we still have access to sections. */
81432 for (i = 0; i < mod->num_symtab; i++)
81433 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
81434
81435- mod->core_symtab = dst = mod->module_core + info->symoffs;
81436- mod->core_strtab = s = mod->module_core + info->stroffs;
81437+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
81438+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
81439 src = mod->symtab;
81440 for (ndst = i = 0; i < mod->num_symtab; i++) {
81441 if (i == 0 ||
81442@@ -2372,6 +2392,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
81443 }
81444 }
81445 mod->core_num_syms = ndst;
81446+
81447+ pax_close_kernel();
81448 }
81449 #else
81450 static inline void layout_symtab(struct module *mod, struct load_info *info)
81451@@ -2405,17 +2427,33 @@ void * __weak module_alloc(unsigned long size)
81452 return vmalloc_exec(size);
81453 }
81454
81455-static void *module_alloc_update_bounds(unsigned long size)
81456+static void *module_alloc_update_bounds_rw(unsigned long size)
81457 {
81458 void *ret = module_alloc(size);
81459
81460 if (ret) {
81461 mutex_lock(&module_mutex);
81462 /* Update module bounds. */
81463- if ((unsigned long)ret < module_addr_min)
81464- module_addr_min = (unsigned long)ret;
81465- if ((unsigned long)ret + size > module_addr_max)
81466- module_addr_max = (unsigned long)ret + size;
81467+ if ((unsigned long)ret < module_addr_min_rw)
81468+ module_addr_min_rw = (unsigned long)ret;
81469+ if ((unsigned long)ret + size > module_addr_max_rw)
81470+ module_addr_max_rw = (unsigned long)ret + size;
81471+ mutex_unlock(&module_mutex);
81472+ }
81473+ return ret;
81474+}
81475+
81476+static void *module_alloc_update_bounds_rx(unsigned long size)
81477+{
81478+ void *ret = module_alloc_exec(size);
81479+
81480+ if (ret) {
81481+ mutex_lock(&module_mutex);
81482+ /* Update module bounds. */
81483+ if ((unsigned long)ret < module_addr_min_rx)
81484+ module_addr_min_rx = (unsigned long)ret;
81485+ if ((unsigned long)ret + size > module_addr_max_rx)
81486+ module_addr_max_rx = (unsigned long)ret + size;
81487 mutex_unlock(&module_mutex);
81488 }
81489 return ret;
81490@@ -2691,8 +2729,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
81491 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
81492 {
81493 const char *modmagic = get_modinfo(info, "vermagic");
81494+ const char *license = get_modinfo(info, "license");
81495 int err;
81496
81497+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
81498+ if (!license || !license_is_gpl_compatible(license))
81499+ return -ENOEXEC;
81500+#endif
81501+
81502 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
81503 modmagic = NULL;
81504
81505@@ -2718,7 +2762,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
81506 }
81507
81508 /* Set up license info based on the info section */
81509- set_license(mod, get_modinfo(info, "license"));
81510+ set_license(mod, license);
81511
81512 return 0;
81513 }
81514@@ -2799,7 +2843,7 @@ static int move_module(struct module *mod, struct load_info *info)
81515 void *ptr;
81516
81517 /* Do the allocs. */
81518- ptr = module_alloc_update_bounds(mod->core_size);
81519+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
81520 /*
81521 * The pointer to this block is stored in the module structure
81522 * which is inside the block. Just mark it as not being a
81523@@ -2809,11 +2853,11 @@ static int move_module(struct module *mod, struct load_info *info)
81524 if (!ptr)
81525 return -ENOMEM;
81526
81527- memset(ptr, 0, mod->core_size);
81528- mod->module_core = ptr;
81529+ memset(ptr, 0, mod->core_size_rw);
81530+ mod->module_core_rw = ptr;
81531
81532- if (mod->init_size) {
81533- ptr = module_alloc_update_bounds(mod->init_size);
81534+ if (mod->init_size_rw) {
81535+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
81536 /*
81537 * The pointer to this block is stored in the module structure
81538 * which is inside the block. This block doesn't need to be
81539@@ -2822,13 +2866,45 @@ static int move_module(struct module *mod, struct load_info *info)
81540 */
81541 kmemleak_ignore(ptr);
81542 if (!ptr) {
81543- module_free(mod, mod->module_core);
81544+ module_free(mod, mod->module_core_rw);
81545 return -ENOMEM;
81546 }
81547- memset(ptr, 0, mod->init_size);
81548- mod->module_init = ptr;
81549+ memset(ptr, 0, mod->init_size_rw);
81550+ mod->module_init_rw = ptr;
81551 } else
81552- mod->module_init = NULL;
81553+ mod->module_init_rw = NULL;
81554+
81555+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
81556+ kmemleak_not_leak(ptr);
81557+ if (!ptr) {
81558+ if (mod->module_init_rw)
81559+ module_free(mod, mod->module_init_rw);
81560+ module_free(mod, mod->module_core_rw);
81561+ return -ENOMEM;
81562+ }
81563+
81564+ pax_open_kernel();
81565+ memset(ptr, 0, mod->core_size_rx);
81566+ pax_close_kernel();
81567+ mod->module_core_rx = ptr;
81568+
81569+ if (mod->init_size_rx) {
81570+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
81571+ kmemleak_ignore(ptr);
81572+ if (!ptr && mod->init_size_rx) {
81573+ module_free_exec(mod, mod->module_core_rx);
81574+ if (mod->module_init_rw)
81575+ module_free(mod, mod->module_init_rw);
81576+ module_free(mod, mod->module_core_rw);
81577+ return -ENOMEM;
81578+ }
81579+
81580+ pax_open_kernel();
81581+ memset(ptr, 0, mod->init_size_rx);
81582+ pax_close_kernel();
81583+ mod->module_init_rx = ptr;
81584+ } else
81585+ mod->module_init_rx = NULL;
81586
81587 /* Transfer each section which specifies SHF_ALLOC */
81588 pr_debug("final section addresses:\n");
81589@@ -2839,16 +2915,45 @@ static int move_module(struct module *mod, struct load_info *info)
81590 if (!(shdr->sh_flags & SHF_ALLOC))
81591 continue;
81592
81593- if (shdr->sh_entsize & INIT_OFFSET_MASK)
81594- dest = mod->module_init
81595- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
81596- else
81597- dest = mod->module_core + shdr->sh_entsize;
81598+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
81599+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
81600+ dest = mod->module_init_rw
81601+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
81602+ else
81603+ dest = mod->module_init_rx
81604+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
81605+ } else {
81606+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
81607+ dest = mod->module_core_rw + shdr->sh_entsize;
81608+ else
81609+ dest = mod->module_core_rx + shdr->sh_entsize;
81610+ }
81611+
81612+ if (shdr->sh_type != SHT_NOBITS) {
81613+
81614+#ifdef CONFIG_PAX_KERNEXEC
81615+#ifdef CONFIG_X86_64
81616+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
81617+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
81618+#endif
81619+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
81620+ pax_open_kernel();
81621+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
81622+ pax_close_kernel();
81623+ } else
81624+#endif
81625
81626- if (shdr->sh_type != SHT_NOBITS)
81627 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
81628+ }
81629 /* Update sh_addr to point to copy in image. */
81630- shdr->sh_addr = (unsigned long)dest;
81631+
81632+#ifdef CONFIG_PAX_KERNEXEC
81633+ if (shdr->sh_flags & SHF_EXECINSTR)
81634+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
81635+ else
81636+#endif
81637+
81638+ shdr->sh_addr = (unsigned long)dest;
81639 pr_debug("\t0x%lx %s\n",
81640 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
81641 }
81642@@ -2905,12 +3010,12 @@ static void flush_module_icache(const struct module *mod)
81643 * Do it before processing of module parameters, so the module
81644 * can provide parameter accessor functions of its own.
81645 */
81646- if (mod->module_init)
81647- flush_icache_range((unsigned long)mod->module_init,
81648- (unsigned long)mod->module_init
81649- + mod->init_size);
81650- flush_icache_range((unsigned long)mod->module_core,
81651- (unsigned long)mod->module_core + mod->core_size);
81652+ if (mod->module_init_rx)
81653+ flush_icache_range((unsigned long)mod->module_init_rx,
81654+ (unsigned long)mod->module_init_rx
81655+ + mod->init_size_rx);
81656+ flush_icache_range((unsigned long)mod->module_core_rx,
81657+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
81658
81659 set_fs(old_fs);
81660 }
81661@@ -2977,8 +3082,10 @@ static int alloc_module_percpu(struct module *mod, struct load_info *info)
81662 static void module_deallocate(struct module *mod, struct load_info *info)
81663 {
81664 percpu_modfree(mod);
81665- module_free(mod, mod->module_init);
81666- module_free(mod, mod->module_core);
81667+ module_free_exec(mod, mod->module_init_rx);
81668+ module_free_exec(mod, mod->module_core_rx);
81669+ module_free(mod, mod->module_init_rw);
81670+ module_free(mod, mod->module_core_rw);
81671 }
81672
81673 int __weak module_finalize(const Elf_Ehdr *hdr,
81674@@ -2991,7 +3098,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
81675 static int post_relocation(struct module *mod, const struct load_info *info)
81676 {
81677 /* Sort exception table now relocations are done. */
81678+ pax_open_kernel();
81679 sort_extable(mod->extable, mod->extable + mod->num_exentries);
81680+ pax_close_kernel();
81681
81682 /* Copy relocated percpu area over. */
81683 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
81684@@ -3045,16 +3154,16 @@ static int do_init_module(struct module *mod)
81685 MODULE_STATE_COMING, mod);
81686
81687 /* Set RO and NX regions for core */
81688- set_section_ro_nx(mod->module_core,
81689- mod->core_text_size,
81690- mod->core_ro_size,
81691- mod->core_size);
81692+ set_section_ro_nx(mod->module_core_rx,
81693+ mod->core_size_rx,
81694+ mod->core_size_rx,
81695+ mod->core_size_rx);
81696
81697 /* Set RO and NX regions for init */
81698- set_section_ro_nx(mod->module_init,
81699- mod->init_text_size,
81700- mod->init_ro_size,
81701- mod->init_size);
81702+ set_section_ro_nx(mod->module_init_rx,
81703+ mod->init_size_rx,
81704+ mod->init_size_rx,
81705+ mod->init_size_rx);
81706
81707 do_mod_ctors(mod);
81708 /* Start the module */
81709@@ -3116,11 +3225,12 @@ static int do_init_module(struct module *mod)
81710 mod->strtab = mod->core_strtab;
81711 #endif
81712 unset_module_init_ro_nx(mod);
81713- module_free(mod, mod->module_init);
81714- mod->module_init = NULL;
81715- mod->init_size = 0;
81716- mod->init_ro_size = 0;
81717- mod->init_text_size = 0;
81718+ module_free(mod, mod->module_init_rw);
81719+ module_free_exec(mod, mod->module_init_rx);
81720+ mod->module_init_rw = NULL;
81721+ mod->module_init_rx = NULL;
81722+ mod->init_size_rw = 0;
81723+ mod->init_size_rx = 0;
81724 mutex_unlock(&module_mutex);
81725 wake_up_all(&module_wq);
81726
81727@@ -3252,9 +3362,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
81728 if (err)
81729 goto free_unload;
81730
81731+ /* Now copy in args */
81732+ mod->args = strndup_user(uargs, ~0UL >> 1);
81733+ if (IS_ERR(mod->args)) {
81734+ err = PTR_ERR(mod->args);
81735+ goto free_unload;
81736+ }
81737+
81738 /* Set up MODINFO_ATTR fields */
81739 setup_modinfo(mod, info);
81740
81741+#ifdef CONFIG_GRKERNSEC_MODHARDEN
81742+ {
81743+ char *p, *p2;
81744+
81745+ if (strstr(mod->args, "grsec_modharden_netdev")) {
81746+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
81747+ err = -EPERM;
81748+ goto free_modinfo;
81749+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
81750+ p += sizeof("grsec_modharden_normal") - 1;
81751+ p2 = strstr(p, "_");
81752+ if (p2) {
81753+ *p2 = '\0';
81754+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
81755+ *p2 = '_';
81756+ }
81757+ err = -EPERM;
81758+ goto free_modinfo;
81759+ }
81760+ }
81761+#endif
81762+
81763 /* Fix up syms, so that st_value is a pointer to location. */
81764 err = simplify_symbols(mod, info);
81765 if (err < 0)
81766@@ -3270,13 +3409,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
81767
81768 flush_module_icache(mod);
81769
81770- /* Now copy in args */
81771- mod->args = strndup_user(uargs, ~0UL >> 1);
81772- if (IS_ERR(mod->args)) {
81773- err = PTR_ERR(mod->args);
81774- goto free_arch_cleanup;
81775- }
81776-
81777 dynamic_debug_setup(info->debug, info->num_debug);
81778
81779 /* Finally it's fully formed, ready to start executing. */
81780@@ -3311,11 +3443,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
81781 ddebug_cleanup:
81782 dynamic_debug_remove(info->debug);
81783 synchronize_sched();
81784- kfree(mod->args);
81785- free_arch_cleanup:
81786 module_arch_cleanup(mod);
81787 free_modinfo:
81788 free_modinfo(mod);
81789+ kfree(mod->args);
81790 free_unload:
81791 module_unload_free(mod);
81792 unlink_mod:
81793@@ -3398,10 +3529,16 @@ static const char *get_ksymbol(struct module *mod,
81794 unsigned long nextval;
81795
81796 /* At worse, next value is at end of module */
81797- if (within_module_init(addr, mod))
81798- nextval = (unsigned long)mod->module_init+mod->init_text_size;
81799+ if (within_module_init_rx(addr, mod))
81800+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
81801+ else if (within_module_init_rw(addr, mod))
81802+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
81803+ else if (within_module_core_rx(addr, mod))
81804+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
81805+ else if (within_module_core_rw(addr, mod))
81806+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
81807 else
81808- nextval = (unsigned long)mod->module_core+mod->core_text_size;
81809+ return NULL;
81810
81811 /* Scan for closest preceding symbol, and next symbol. (ELF
81812 starts real symbols at 1). */
81813@@ -3654,7 +3791,7 @@ static int m_show(struct seq_file *m, void *p)
81814 return 0;
81815
81816 seq_printf(m, "%s %u",
81817- mod->name, mod->init_size + mod->core_size);
81818+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
81819 print_unload_info(m, mod);
81820
81821 /* Informative for users. */
81822@@ -3663,7 +3800,7 @@ static int m_show(struct seq_file *m, void *p)
81823 mod->state == MODULE_STATE_COMING ? "Loading":
81824 "Live");
81825 /* Used by oprofile and other similar tools. */
81826- seq_printf(m, " 0x%pK", mod->module_core);
81827+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
81828
81829 /* Taints info */
81830 if (mod->taints)
81831@@ -3699,7 +3836,17 @@ static const struct file_operations proc_modules_operations = {
81832
81833 static int __init proc_modules_init(void)
81834 {
81835+#ifndef CONFIG_GRKERNSEC_HIDESYM
81836+#ifdef CONFIG_GRKERNSEC_PROC_USER
81837+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
81838+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
81839+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
81840+#else
81841 proc_create("modules", 0, NULL, &proc_modules_operations);
81842+#endif
81843+#else
81844+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
81845+#endif
81846 return 0;
81847 }
81848 module_init(proc_modules_init);
81849@@ -3760,14 +3907,14 @@ struct module *__module_address(unsigned long addr)
81850 {
81851 struct module *mod;
81852
81853- if (addr < module_addr_min || addr > module_addr_max)
81854+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
81855+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
81856 return NULL;
81857
81858 list_for_each_entry_rcu(mod, &modules, list) {
81859 if (mod->state == MODULE_STATE_UNFORMED)
81860 continue;
81861- if (within_module_core(addr, mod)
81862- || within_module_init(addr, mod))
81863+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
81864 return mod;
81865 }
81866 return NULL;
81867@@ -3802,11 +3949,20 @@ bool is_module_text_address(unsigned long addr)
81868 */
81869 struct module *__module_text_address(unsigned long addr)
81870 {
81871- struct module *mod = __module_address(addr);
81872+ struct module *mod;
81873+
81874+#ifdef CONFIG_X86_32
81875+ addr = ktla_ktva(addr);
81876+#endif
81877+
81878+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
81879+ return NULL;
81880+
81881+ mod = __module_address(addr);
81882+
81883 if (mod) {
81884 /* Make sure it's within the text section. */
81885- if (!within(addr, mod->module_init, mod->init_text_size)
81886- && !within(addr, mod->module_core, mod->core_text_size))
81887+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
81888 mod = NULL;
81889 }
81890 return mod;
81891diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
81892index 7e3443f..b2a1e6b 100644
81893--- a/kernel/mutex-debug.c
81894+++ b/kernel/mutex-debug.c
81895@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
81896 }
81897
81898 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
81899- struct thread_info *ti)
81900+ struct task_struct *task)
81901 {
81902 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
81903
81904 /* Mark the current thread as blocked on the lock: */
81905- ti->task->blocked_on = waiter;
81906+ task->blocked_on = waiter;
81907 }
81908
81909 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
81910- struct thread_info *ti)
81911+ struct task_struct *task)
81912 {
81913 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
81914- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
81915- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
81916- ti->task->blocked_on = NULL;
81917+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
81918+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
81919+ task->blocked_on = NULL;
81920
81921 list_del_init(&waiter->list);
81922 waiter->task = NULL;
81923diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
81924index 0799fd3..d06ae3b 100644
81925--- a/kernel/mutex-debug.h
81926+++ b/kernel/mutex-debug.h
81927@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
81928 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
81929 extern void debug_mutex_add_waiter(struct mutex *lock,
81930 struct mutex_waiter *waiter,
81931- struct thread_info *ti);
81932+ struct task_struct *task);
81933 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
81934- struct thread_info *ti);
81935+ struct task_struct *task);
81936 extern void debug_mutex_unlock(struct mutex *lock);
81937 extern void debug_mutex_init(struct mutex *lock, const char *name,
81938 struct lock_class_key *key);
81939diff --git a/kernel/mutex.c b/kernel/mutex.c
81940index ad53a66..f1bf8bc 100644
81941--- a/kernel/mutex.c
81942+++ b/kernel/mutex.c
81943@@ -134,7 +134,7 @@ void mspin_lock(struct mspin_node **lock, struct mspin_node *node)
81944 node->locked = 1;
81945 return;
81946 }
81947- ACCESS_ONCE(prev->next) = node;
81948+ ACCESS_ONCE_RW(prev->next) = node;
81949 smp_wmb();
81950 /* Wait until the lock holder passes the lock down */
81951 while (!ACCESS_ONCE(node->locked))
81952@@ -155,7 +155,7 @@ static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node)
81953 while (!(next = ACCESS_ONCE(node->next)))
81954 arch_mutex_cpu_relax();
81955 }
81956- ACCESS_ONCE(next->locked) = 1;
81957+ ACCESS_ONCE_RW(next->locked) = 1;
81958 smp_wmb();
81959 }
81960
81961@@ -341,7 +341,7 @@ slowpath:
81962 spin_lock_mutex(&lock->wait_lock, flags);
81963
81964 debug_mutex_lock_common(lock, &waiter);
81965- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
81966+ debug_mutex_add_waiter(lock, &waiter, task);
81967
81968 /* add waiting tasks to the end of the waitqueue (FIFO): */
81969 list_add_tail(&waiter.list, &lock->wait_list);
81970@@ -371,8 +371,7 @@ slowpath:
81971 * TASK_UNINTERRUPTIBLE case.)
81972 */
81973 if (unlikely(signal_pending_state(state, task))) {
81974- mutex_remove_waiter(lock, &waiter,
81975- task_thread_info(task));
81976+ mutex_remove_waiter(lock, &waiter, task);
81977 mutex_release(&lock->dep_map, 1, ip);
81978 spin_unlock_mutex(&lock->wait_lock, flags);
81979
81980@@ -391,7 +390,7 @@ slowpath:
81981 done:
81982 lock_acquired(&lock->dep_map, ip);
81983 /* got the lock - rejoice! */
81984- mutex_remove_waiter(lock, &waiter, current_thread_info());
81985+ mutex_remove_waiter(lock, &waiter, task);
81986 mutex_set_owner(lock);
81987
81988 /* set it to 0 if there are no waiters left: */
81989diff --git a/kernel/notifier.c b/kernel/notifier.c
81990index 2d5cc4c..d9ea600 100644
81991--- a/kernel/notifier.c
81992+++ b/kernel/notifier.c
81993@@ -5,6 +5,7 @@
81994 #include <linux/rcupdate.h>
81995 #include <linux/vmalloc.h>
81996 #include <linux/reboot.h>
81997+#include <linux/mm.h>
81998
81999 /*
82000 * Notifier list for kernel code which wants to be called
82001@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
82002 while ((*nl) != NULL) {
82003 if (n->priority > (*nl)->priority)
82004 break;
82005- nl = &((*nl)->next);
82006+ nl = (struct notifier_block **)&((*nl)->next);
82007 }
82008- n->next = *nl;
82009+ pax_open_kernel();
82010+ *(const void **)&n->next = *nl;
82011 rcu_assign_pointer(*nl, n);
82012+ pax_close_kernel();
82013 return 0;
82014 }
82015
82016@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
82017 return 0;
82018 if (n->priority > (*nl)->priority)
82019 break;
82020- nl = &((*nl)->next);
82021+ nl = (struct notifier_block **)&((*nl)->next);
82022 }
82023- n->next = *nl;
82024+ pax_open_kernel();
82025+ *(const void **)&n->next = *nl;
82026 rcu_assign_pointer(*nl, n);
82027+ pax_close_kernel();
82028 return 0;
82029 }
82030
82031@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
82032 {
82033 while ((*nl) != NULL) {
82034 if ((*nl) == n) {
82035+ pax_open_kernel();
82036 rcu_assign_pointer(*nl, n->next);
82037+ pax_close_kernel();
82038 return 0;
82039 }
82040- nl = &((*nl)->next);
82041+ nl = (struct notifier_block **)&((*nl)->next);
82042 }
82043 return -ENOENT;
82044 }
82045diff --git a/kernel/panic.c b/kernel/panic.c
82046index 167ec09..0dda5f9 100644
82047--- a/kernel/panic.c
82048+++ b/kernel/panic.c
82049@@ -400,7 +400,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
82050 unsigned taint, struct slowpath_args *args)
82051 {
82052 printk(KERN_WARNING "------------[ cut here ]------------\n");
82053- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
82054+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
82055
82056 if (args)
82057 vprintk(args->fmt, args->args);
82058@@ -453,7 +453,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
82059 */
82060 void __stack_chk_fail(void)
82061 {
82062- panic("stack-protector: Kernel stack is corrupted in: %p\n",
82063+ dump_stack();
82064+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
82065 __builtin_return_address(0));
82066 }
82067 EXPORT_SYMBOL(__stack_chk_fail);
82068diff --git a/kernel/pid.c b/kernel/pid.c
82069index 0db3e79..95b9dc2 100644
82070--- a/kernel/pid.c
82071+++ b/kernel/pid.c
82072@@ -33,6 +33,7 @@
82073 #include <linux/rculist.h>
82074 #include <linux/bootmem.h>
82075 #include <linux/hash.h>
82076+#include <linux/security.h>
82077 #include <linux/pid_namespace.h>
82078 #include <linux/init_task.h>
82079 #include <linux/syscalls.h>
82080@@ -47,7 +48,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
82081
82082 int pid_max = PID_MAX_DEFAULT;
82083
82084-#define RESERVED_PIDS 300
82085+#define RESERVED_PIDS 500
82086
82087 int pid_max_min = RESERVED_PIDS + 1;
82088 int pid_max_max = PID_MAX_LIMIT;
82089@@ -442,10 +443,18 @@ EXPORT_SYMBOL(pid_task);
82090 */
82091 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
82092 {
82093+ struct task_struct *task;
82094+
82095 rcu_lockdep_assert(rcu_read_lock_held(),
82096 "find_task_by_pid_ns() needs rcu_read_lock()"
82097 " protection");
82098- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
82099+
82100+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
82101+
82102+ if (gr_pid_is_chrooted(task))
82103+ return NULL;
82104+
82105+ return task;
82106 }
82107
82108 struct task_struct *find_task_by_vpid(pid_t vnr)
82109@@ -453,6 +462,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
82110 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
82111 }
82112
82113+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
82114+{
82115+ rcu_lockdep_assert(rcu_read_lock_held(),
82116+ "find_task_by_pid_ns() needs rcu_read_lock()"
82117+ " protection");
82118+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
82119+}
82120+
82121 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
82122 {
82123 struct pid *pid;
82124diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
82125index 6917e8e..9909aeb 100644
82126--- a/kernel/pid_namespace.c
82127+++ b/kernel/pid_namespace.c
82128@@ -247,7 +247,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
82129 void __user *buffer, size_t *lenp, loff_t *ppos)
82130 {
82131 struct pid_namespace *pid_ns = task_active_pid_ns(current);
82132- struct ctl_table tmp = *table;
82133+ ctl_table_no_const tmp = *table;
82134
82135 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
82136 return -EPERM;
82137diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
82138index 42670e9..8719c2f 100644
82139--- a/kernel/posix-cpu-timers.c
82140+++ b/kernel/posix-cpu-timers.c
82141@@ -1636,14 +1636,14 @@ struct k_clock clock_posix_cpu = {
82142
82143 static __init int init_posix_cpu_timers(void)
82144 {
82145- struct k_clock process = {
82146+ static struct k_clock process = {
82147 .clock_getres = process_cpu_clock_getres,
82148 .clock_get = process_cpu_clock_get,
82149 .timer_create = process_cpu_timer_create,
82150 .nsleep = process_cpu_nsleep,
82151 .nsleep_restart = process_cpu_nsleep_restart,
82152 };
82153- struct k_clock thread = {
82154+ static struct k_clock thread = {
82155 .clock_getres = thread_cpu_clock_getres,
82156 .clock_get = thread_cpu_clock_get,
82157 .timer_create = thread_cpu_timer_create,
82158diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
82159index 424c2d4..679242f 100644
82160--- a/kernel/posix-timers.c
82161+++ b/kernel/posix-timers.c
82162@@ -43,6 +43,7 @@
82163 #include <linux/hash.h>
82164 #include <linux/posix-clock.h>
82165 #include <linux/posix-timers.h>
82166+#include <linux/grsecurity.h>
82167 #include <linux/syscalls.h>
82168 #include <linux/wait.h>
82169 #include <linux/workqueue.h>
82170@@ -122,7 +123,7 @@ static DEFINE_SPINLOCK(hash_lock);
82171 * which we beg off on and pass to do_sys_settimeofday().
82172 */
82173
82174-static struct k_clock posix_clocks[MAX_CLOCKS];
82175+static struct k_clock *posix_clocks[MAX_CLOCKS];
82176
82177 /*
82178 * These ones are defined below.
82179@@ -275,7 +276,7 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
82180 */
82181 static __init int init_posix_timers(void)
82182 {
82183- struct k_clock clock_realtime = {
82184+ static struct k_clock clock_realtime = {
82185 .clock_getres = hrtimer_get_res,
82186 .clock_get = posix_clock_realtime_get,
82187 .clock_set = posix_clock_realtime_set,
82188@@ -287,7 +288,7 @@ static __init int init_posix_timers(void)
82189 .timer_get = common_timer_get,
82190 .timer_del = common_timer_del,
82191 };
82192- struct k_clock clock_monotonic = {
82193+ static struct k_clock clock_monotonic = {
82194 .clock_getres = hrtimer_get_res,
82195 .clock_get = posix_ktime_get_ts,
82196 .nsleep = common_nsleep,
82197@@ -297,19 +298,19 @@ static __init int init_posix_timers(void)
82198 .timer_get = common_timer_get,
82199 .timer_del = common_timer_del,
82200 };
82201- struct k_clock clock_monotonic_raw = {
82202+ static struct k_clock clock_monotonic_raw = {
82203 .clock_getres = hrtimer_get_res,
82204 .clock_get = posix_get_monotonic_raw,
82205 };
82206- struct k_clock clock_realtime_coarse = {
82207+ static struct k_clock clock_realtime_coarse = {
82208 .clock_getres = posix_get_coarse_res,
82209 .clock_get = posix_get_realtime_coarse,
82210 };
82211- struct k_clock clock_monotonic_coarse = {
82212+ static struct k_clock clock_monotonic_coarse = {
82213 .clock_getres = posix_get_coarse_res,
82214 .clock_get = posix_get_monotonic_coarse,
82215 };
82216- struct k_clock clock_tai = {
82217+ static struct k_clock clock_tai = {
82218 .clock_getres = hrtimer_get_res,
82219 .clock_get = posix_get_tai,
82220 .nsleep = common_nsleep,
82221@@ -319,7 +320,7 @@ static __init int init_posix_timers(void)
82222 .timer_get = common_timer_get,
82223 .timer_del = common_timer_del,
82224 };
82225- struct k_clock clock_boottime = {
82226+ static struct k_clock clock_boottime = {
82227 .clock_getres = hrtimer_get_res,
82228 .clock_get = posix_get_boottime,
82229 .nsleep = common_nsleep,
82230@@ -531,7 +532,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
82231 return;
82232 }
82233
82234- posix_clocks[clock_id] = *new_clock;
82235+ posix_clocks[clock_id] = new_clock;
82236 }
82237 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
82238
82239@@ -577,9 +578,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
82240 return (id & CLOCKFD_MASK) == CLOCKFD ?
82241 &clock_posix_dynamic : &clock_posix_cpu;
82242
82243- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
82244+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
82245 return NULL;
82246- return &posix_clocks[id];
82247+ return posix_clocks[id];
82248 }
82249
82250 static int common_timer_create(struct k_itimer *new_timer)
82251@@ -597,7 +598,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
82252 struct k_clock *kc = clockid_to_kclock(which_clock);
82253 struct k_itimer *new_timer;
82254 int error, new_timer_id;
82255- sigevent_t event;
82256+ sigevent_t event = { };
82257 int it_id_set = IT_ID_NOT_SET;
82258
82259 if (!kc)
82260@@ -1011,6 +1012,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
82261 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
82262 return -EFAULT;
82263
82264+ /* only the CLOCK_REALTIME clock can be set, all other clocks
82265+ have their clock_set fptr set to a nosettime dummy function
82266+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
82267+ call common_clock_set, which calls do_sys_settimeofday, which
82268+ we hook
82269+ */
82270+
82271 return kc->clock_set(which_clock, &new_tp);
82272 }
82273
82274diff --git a/kernel/power/process.c b/kernel/power/process.c
82275index 98088e0..aaf95c0 100644
82276--- a/kernel/power/process.c
82277+++ b/kernel/power/process.c
82278@@ -33,6 +33,7 @@ static int try_to_freeze_tasks(bool user_only)
82279 u64 elapsed_csecs64;
82280 unsigned int elapsed_csecs;
82281 bool wakeup = false;
82282+ bool timedout = false;
82283
82284 do_gettimeofday(&start);
82285
82286@@ -43,13 +44,20 @@ static int try_to_freeze_tasks(bool user_only)
82287
82288 while (true) {
82289 todo = 0;
82290+ if (time_after(jiffies, end_time))
82291+ timedout = true;
82292 read_lock(&tasklist_lock);
82293 do_each_thread(g, p) {
82294 if (p == current || !freeze_task(p))
82295 continue;
82296
82297- if (!freezer_should_skip(p))
82298+ if (!freezer_should_skip(p)) {
82299 todo++;
82300+ if (timedout) {
82301+ printk(KERN_ERR "Task refusing to freeze:\n");
82302+ sched_show_task(p);
82303+ }
82304+ }
82305 } while_each_thread(g, p);
82306 read_unlock(&tasklist_lock);
82307
82308@@ -58,7 +66,7 @@ static int try_to_freeze_tasks(bool user_only)
82309 todo += wq_busy;
82310 }
82311
82312- if (!todo || time_after(jiffies, end_time))
82313+ if (!todo || timedout)
82314 break;
82315
82316 if (pm_wakeup_pending()) {
82317diff --git a/kernel/printk.c b/kernel/printk.c
82318index d37d45c..ab918b3 100644
82319--- a/kernel/printk.c
82320+++ b/kernel/printk.c
82321@@ -390,6 +390,11 @@ static int check_syslog_permissions(int type, bool from_file)
82322 if (from_file && type != SYSLOG_ACTION_OPEN)
82323 return 0;
82324
82325+#ifdef CONFIG_GRKERNSEC_DMESG
82326+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
82327+ return -EPERM;
82328+#endif
82329+
82330 if (syslog_action_restricted(type)) {
82331 if (capable(CAP_SYSLOG))
82332 return 0;
82333diff --git a/kernel/profile.c b/kernel/profile.c
82334index 0bf4007..6234708 100644
82335--- a/kernel/profile.c
82336+++ b/kernel/profile.c
82337@@ -37,7 +37,7 @@ struct profile_hit {
82338 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
82339 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
82340
82341-static atomic_t *prof_buffer;
82342+static atomic_unchecked_t *prof_buffer;
82343 static unsigned long prof_len, prof_shift;
82344
82345 int prof_on __read_mostly;
82346@@ -260,7 +260,7 @@ static void profile_flip_buffers(void)
82347 hits[i].pc = 0;
82348 continue;
82349 }
82350- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
82351+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
82352 hits[i].hits = hits[i].pc = 0;
82353 }
82354 }
82355@@ -321,9 +321,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
82356 * Add the current hit(s) and flush the write-queue out
82357 * to the global buffer:
82358 */
82359- atomic_add(nr_hits, &prof_buffer[pc]);
82360+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
82361 for (i = 0; i < NR_PROFILE_HIT; ++i) {
82362- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
82363+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
82364 hits[i].pc = hits[i].hits = 0;
82365 }
82366 out:
82367@@ -398,7 +398,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
82368 {
82369 unsigned long pc;
82370 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
82371- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
82372+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
82373 }
82374 #endif /* !CONFIG_SMP */
82375
82376@@ -494,7 +494,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
82377 return -EFAULT;
82378 buf++; p++; count--; read++;
82379 }
82380- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
82381+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
82382 if (copy_to_user(buf, (void *)pnt, count))
82383 return -EFAULT;
82384 read += count;
82385@@ -525,7 +525,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
82386 }
82387 #endif
82388 profile_discard_flip_buffers();
82389- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
82390+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
82391 return count;
82392 }
82393
82394diff --git a/kernel/ptrace.c b/kernel/ptrace.c
82395index 335a7ae..3bbbceb 100644
82396--- a/kernel/ptrace.c
82397+++ b/kernel/ptrace.c
82398@@ -326,7 +326,7 @@ static int ptrace_attach(struct task_struct *task, long request,
82399 if (seize)
82400 flags |= PT_SEIZED;
82401 rcu_read_lock();
82402- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
82403+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
82404 flags |= PT_PTRACE_CAP;
82405 rcu_read_unlock();
82406 task->ptrace = flags;
82407@@ -537,7 +537,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
82408 break;
82409 return -EIO;
82410 }
82411- if (copy_to_user(dst, buf, retval))
82412+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
82413 return -EFAULT;
82414 copied += retval;
82415 src += retval;
82416@@ -805,7 +805,7 @@ int ptrace_request(struct task_struct *child, long request,
82417 bool seized = child->ptrace & PT_SEIZED;
82418 int ret = -EIO;
82419 siginfo_t siginfo, *si;
82420- void __user *datavp = (void __user *) data;
82421+ void __user *datavp = (__force void __user *) data;
82422 unsigned long __user *datalp = datavp;
82423 unsigned long flags;
82424
82425@@ -1011,14 +1011,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
82426 goto out;
82427 }
82428
82429+ if (gr_handle_ptrace(child, request)) {
82430+ ret = -EPERM;
82431+ goto out_put_task_struct;
82432+ }
82433+
82434 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
82435 ret = ptrace_attach(child, request, addr, data);
82436 /*
82437 * Some architectures need to do book-keeping after
82438 * a ptrace attach.
82439 */
82440- if (!ret)
82441+ if (!ret) {
82442 arch_ptrace_attach(child);
82443+ gr_audit_ptrace(child);
82444+ }
82445 goto out_put_task_struct;
82446 }
82447
82448@@ -1046,7 +1053,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
82449 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
82450 if (copied != sizeof(tmp))
82451 return -EIO;
82452- return put_user(tmp, (unsigned long __user *)data);
82453+ return put_user(tmp, (__force unsigned long __user *)data);
82454 }
82455
82456 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
82457@@ -1140,7 +1147,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
82458 }
82459
82460 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
82461- compat_long_t addr, compat_long_t data)
82462+ compat_ulong_t addr, compat_ulong_t data)
82463 {
82464 struct task_struct *child;
82465 long ret;
82466@@ -1156,14 +1163,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
82467 goto out;
82468 }
82469
82470+ if (gr_handle_ptrace(child, request)) {
82471+ ret = -EPERM;
82472+ goto out_put_task_struct;
82473+ }
82474+
82475 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
82476 ret = ptrace_attach(child, request, addr, data);
82477 /*
82478 * Some architectures need to do book-keeping after
82479 * a ptrace attach.
82480 */
82481- if (!ret)
82482+ if (!ret) {
82483 arch_ptrace_attach(child);
82484+ gr_audit_ptrace(child);
82485+ }
82486 goto out_put_task_struct;
82487 }
82488
82489diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
82490index 48ab703..07561d4 100644
82491--- a/kernel/rcupdate.c
82492+++ b/kernel/rcupdate.c
82493@@ -439,10 +439,10 @@ int rcu_jiffies_till_stall_check(void)
82494 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
82495 */
82496 if (till_stall_check < 3) {
82497- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
82498+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
82499 till_stall_check = 3;
82500 } else if (till_stall_check > 300) {
82501- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
82502+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
82503 till_stall_check = 300;
82504 }
82505 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
82506diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
82507index a0714a5..2ab5e34 100644
82508--- a/kernel/rcutiny.c
82509+++ b/kernel/rcutiny.c
82510@@ -46,7 +46,7 @@
82511 struct rcu_ctrlblk;
82512 static void invoke_rcu_callbacks(void);
82513 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
82514-static void rcu_process_callbacks(struct softirq_action *unused);
82515+static void rcu_process_callbacks(void);
82516 static void __call_rcu(struct rcu_head *head,
82517 void (*func)(struct rcu_head *rcu),
82518 struct rcu_ctrlblk *rcp);
82519@@ -312,7 +312,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
82520 rcu_is_callbacks_kthread()));
82521 }
82522
82523-static void rcu_process_callbacks(struct softirq_action *unused)
82524+static void rcu_process_callbacks(void)
82525 {
82526 __rcu_process_callbacks(&rcu_sched_ctrlblk);
82527 __rcu_process_callbacks(&rcu_bh_ctrlblk);
82528diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
82529index 8a23300..4255818 100644
82530--- a/kernel/rcutiny_plugin.h
82531+++ b/kernel/rcutiny_plugin.h
82532@@ -945,7 +945,7 @@ static int rcu_kthread(void *arg)
82533 have_rcu_kthread_work = morework;
82534 local_irq_restore(flags);
82535 if (work)
82536- rcu_process_callbacks(NULL);
82537+ rcu_process_callbacks();
82538 schedule_timeout_interruptible(1); /* Leave CPU for others. */
82539 }
82540
82541diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
82542index e1f3a8c..42c94a2 100644
82543--- a/kernel/rcutorture.c
82544+++ b/kernel/rcutorture.c
82545@@ -164,12 +164,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
82546 { 0 };
82547 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
82548 { 0 };
82549-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
82550-static atomic_t n_rcu_torture_alloc;
82551-static atomic_t n_rcu_torture_alloc_fail;
82552-static atomic_t n_rcu_torture_free;
82553-static atomic_t n_rcu_torture_mberror;
82554-static atomic_t n_rcu_torture_error;
82555+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
82556+static atomic_unchecked_t n_rcu_torture_alloc;
82557+static atomic_unchecked_t n_rcu_torture_alloc_fail;
82558+static atomic_unchecked_t n_rcu_torture_free;
82559+static atomic_unchecked_t n_rcu_torture_mberror;
82560+static atomic_unchecked_t n_rcu_torture_error;
82561 static long n_rcu_torture_barrier_error;
82562 static long n_rcu_torture_boost_ktrerror;
82563 static long n_rcu_torture_boost_rterror;
82564@@ -287,11 +287,11 @@ rcu_torture_alloc(void)
82565
82566 spin_lock_bh(&rcu_torture_lock);
82567 if (list_empty(&rcu_torture_freelist)) {
82568- atomic_inc(&n_rcu_torture_alloc_fail);
82569+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
82570 spin_unlock_bh(&rcu_torture_lock);
82571 return NULL;
82572 }
82573- atomic_inc(&n_rcu_torture_alloc);
82574+ atomic_inc_unchecked(&n_rcu_torture_alloc);
82575 p = rcu_torture_freelist.next;
82576 list_del_init(p);
82577 spin_unlock_bh(&rcu_torture_lock);
82578@@ -304,7 +304,7 @@ rcu_torture_alloc(void)
82579 static void
82580 rcu_torture_free(struct rcu_torture *p)
82581 {
82582- atomic_inc(&n_rcu_torture_free);
82583+ atomic_inc_unchecked(&n_rcu_torture_free);
82584 spin_lock_bh(&rcu_torture_lock);
82585 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
82586 spin_unlock_bh(&rcu_torture_lock);
82587@@ -424,7 +424,7 @@ rcu_torture_cb(struct rcu_head *p)
82588 i = rp->rtort_pipe_count;
82589 if (i > RCU_TORTURE_PIPE_LEN)
82590 i = RCU_TORTURE_PIPE_LEN;
82591- atomic_inc(&rcu_torture_wcount[i]);
82592+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
82593 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
82594 rp->rtort_mbtest = 0;
82595 rcu_torture_free(rp);
82596@@ -472,7 +472,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
82597 i = rp->rtort_pipe_count;
82598 if (i > RCU_TORTURE_PIPE_LEN)
82599 i = RCU_TORTURE_PIPE_LEN;
82600- atomic_inc(&rcu_torture_wcount[i]);
82601+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
82602 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
82603 rp->rtort_mbtest = 0;
82604 list_del(&rp->rtort_free);
82605@@ -990,7 +990,7 @@ rcu_torture_writer(void *arg)
82606 i = old_rp->rtort_pipe_count;
82607 if (i > RCU_TORTURE_PIPE_LEN)
82608 i = RCU_TORTURE_PIPE_LEN;
82609- atomic_inc(&rcu_torture_wcount[i]);
82610+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
82611 old_rp->rtort_pipe_count++;
82612 cur_ops->deferred_free(old_rp);
82613 }
82614@@ -1076,7 +1076,7 @@ static void rcu_torture_timer(unsigned long unused)
82615 return;
82616 }
82617 if (p->rtort_mbtest == 0)
82618- atomic_inc(&n_rcu_torture_mberror);
82619+ atomic_inc_unchecked(&n_rcu_torture_mberror);
82620 spin_lock(&rand_lock);
82621 cur_ops->read_delay(&rand);
82622 n_rcu_torture_timers++;
82623@@ -1146,7 +1146,7 @@ rcu_torture_reader(void *arg)
82624 continue;
82625 }
82626 if (p->rtort_mbtest == 0)
82627- atomic_inc(&n_rcu_torture_mberror);
82628+ atomic_inc_unchecked(&n_rcu_torture_mberror);
82629 cur_ops->read_delay(&rand);
82630 preempt_disable();
82631 pipe_count = p->rtort_pipe_count;
82632@@ -1209,11 +1209,11 @@ rcu_torture_printk(char *page)
82633 rcu_torture_current,
82634 rcu_torture_current_version,
82635 list_empty(&rcu_torture_freelist),
82636- atomic_read(&n_rcu_torture_alloc),
82637- atomic_read(&n_rcu_torture_alloc_fail),
82638- atomic_read(&n_rcu_torture_free));
82639+ atomic_read_unchecked(&n_rcu_torture_alloc),
82640+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
82641+ atomic_read_unchecked(&n_rcu_torture_free));
82642 cnt += sprintf(&page[cnt], "rtmbe: %d rtbke: %ld rtbre: %ld ",
82643- atomic_read(&n_rcu_torture_mberror),
82644+ atomic_read_unchecked(&n_rcu_torture_mberror),
82645 n_rcu_torture_boost_ktrerror,
82646 n_rcu_torture_boost_rterror);
82647 cnt += sprintf(&page[cnt], "rtbf: %ld rtb: %ld nt: %ld ",
82648@@ -1232,14 +1232,14 @@ rcu_torture_printk(char *page)
82649 n_barrier_attempts,
82650 n_rcu_torture_barrier_error);
82651 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
82652- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
82653+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
82654 n_rcu_torture_barrier_error != 0 ||
82655 n_rcu_torture_boost_ktrerror != 0 ||
82656 n_rcu_torture_boost_rterror != 0 ||
82657 n_rcu_torture_boost_failure != 0 ||
82658 i > 1) {
82659 cnt += sprintf(&page[cnt], "!!! ");
82660- atomic_inc(&n_rcu_torture_error);
82661+ atomic_inc_unchecked(&n_rcu_torture_error);
82662 WARN_ON_ONCE(1);
82663 }
82664 cnt += sprintf(&page[cnt], "Reader Pipe: ");
82665@@ -1253,7 +1253,7 @@ rcu_torture_printk(char *page)
82666 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
82667 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
82668 cnt += sprintf(&page[cnt], " %d",
82669- atomic_read(&rcu_torture_wcount[i]));
82670+ atomic_read_unchecked(&rcu_torture_wcount[i]));
82671 }
82672 cnt += sprintf(&page[cnt], "\n");
82673 if (cur_ops->stats)
82674@@ -1962,7 +1962,7 @@ rcu_torture_cleanup(void)
82675
82676 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
82677
82678- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
82679+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
82680 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
82681 else if (n_online_successes != n_online_attempts ||
82682 n_offline_successes != n_offline_attempts)
82683@@ -2031,18 +2031,18 @@ rcu_torture_init(void)
82684
82685 rcu_torture_current = NULL;
82686 rcu_torture_current_version = 0;
82687- atomic_set(&n_rcu_torture_alloc, 0);
82688- atomic_set(&n_rcu_torture_alloc_fail, 0);
82689- atomic_set(&n_rcu_torture_free, 0);
82690- atomic_set(&n_rcu_torture_mberror, 0);
82691- atomic_set(&n_rcu_torture_error, 0);
82692+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
82693+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
82694+ atomic_set_unchecked(&n_rcu_torture_free, 0);
82695+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
82696+ atomic_set_unchecked(&n_rcu_torture_error, 0);
82697 n_rcu_torture_barrier_error = 0;
82698 n_rcu_torture_boost_ktrerror = 0;
82699 n_rcu_torture_boost_rterror = 0;
82700 n_rcu_torture_boost_failure = 0;
82701 n_rcu_torture_boosts = 0;
82702 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
82703- atomic_set(&rcu_torture_wcount[i], 0);
82704+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
82705 for_each_possible_cpu(cpu) {
82706 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
82707 per_cpu(rcu_torture_count, cpu)[i] = 0;
82708diff --git a/kernel/rcutree.c b/kernel/rcutree.c
82709index 3538001..e379e0b 100644
82710--- a/kernel/rcutree.c
82711+++ b/kernel/rcutree.c
82712@@ -358,9 +358,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
82713 rcu_prepare_for_idle(smp_processor_id());
82714 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
82715 smp_mb__before_atomic_inc(); /* See above. */
82716- atomic_inc(&rdtp->dynticks);
82717+ atomic_inc_unchecked(&rdtp->dynticks);
82718 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
82719- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
82720+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
82721
82722 /*
82723 * It is illegal to enter an extended quiescent state while
82724@@ -496,10 +496,10 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
82725 int user)
82726 {
82727 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
82728- atomic_inc(&rdtp->dynticks);
82729+ atomic_inc_unchecked(&rdtp->dynticks);
82730 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
82731 smp_mb__after_atomic_inc(); /* See above. */
82732- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
82733+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
82734 rcu_cleanup_after_idle(smp_processor_id());
82735 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
82736 if (!user && !is_idle_task(current)) {
82737@@ -638,14 +638,14 @@ void rcu_nmi_enter(void)
82738 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
82739
82740 if (rdtp->dynticks_nmi_nesting == 0 &&
82741- (atomic_read(&rdtp->dynticks) & 0x1))
82742+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
82743 return;
82744 rdtp->dynticks_nmi_nesting++;
82745 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
82746- atomic_inc(&rdtp->dynticks);
82747+ atomic_inc_unchecked(&rdtp->dynticks);
82748 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
82749 smp_mb__after_atomic_inc(); /* See above. */
82750- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
82751+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
82752 }
82753
82754 /**
82755@@ -664,9 +664,9 @@ void rcu_nmi_exit(void)
82756 return;
82757 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
82758 smp_mb__before_atomic_inc(); /* See above. */
82759- atomic_inc(&rdtp->dynticks);
82760+ atomic_inc_unchecked(&rdtp->dynticks);
82761 smp_mb__after_atomic_inc(); /* Force delay to next write. */
82762- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
82763+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
82764 }
82765
82766 /**
82767@@ -680,7 +680,7 @@ int rcu_is_cpu_idle(void)
82768 int ret;
82769
82770 preempt_disable();
82771- ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
82772+ ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
82773 preempt_enable();
82774 return ret;
82775 }
82776@@ -748,7 +748,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
82777 */
82778 static int dyntick_save_progress_counter(struct rcu_data *rdp)
82779 {
82780- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
82781+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
82782 return (rdp->dynticks_snap & 0x1) == 0;
82783 }
82784
82785@@ -763,7 +763,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
82786 unsigned int curr;
82787 unsigned int snap;
82788
82789- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
82790+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
82791 snap = (unsigned int)rdp->dynticks_snap;
82792
82793 /*
82794@@ -1440,9 +1440,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
82795 rdp = this_cpu_ptr(rsp->rda);
82796 rcu_preempt_check_blocked_tasks(rnp);
82797 rnp->qsmask = rnp->qsmaskinit;
82798- ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
82799+ ACCESS_ONCE_RW(rnp->gpnum) = rsp->gpnum;
82800 WARN_ON_ONCE(rnp->completed != rsp->completed);
82801- ACCESS_ONCE(rnp->completed) = rsp->completed;
82802+ ACCESS_ONCE_RW(rnp->completed) = rsp->completed;
82803 if (rnp == rdp->mynode)
82804 rcu_start_gp_per_cpu(rsp, rnp, rdp);
82805 rcu_preempt_boost_start_gp(rnp);
82806@@ -1524,7 +1524,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
82807 */
82808 rcu_for_each_node_breadth_first(rsp, rnp) {
82809 raw_spin_lock_irq(&rnp->lock);
82810- ACCESS_ONCE(rnp->completed) = rsp->gpnum;
82811+ ACCESS_ONCE_RW(rnp->completed) = rsp->gpnum;
82812 rdp = this_cpu_ptr(rsp->rda);
82813 if (rnp == rdp->mynode)
82814 __rcu_process_gp_end(rsp, rnp, rdp);
82815@@ -1855,7 +1855,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
82816 rsp->qlen += rdp->qlen;
82817 rdp->n_cbs_orphaned += rdp->qlen;
82818 rdp->qlen_lazy = 0;
82819- ACCESS_ONCE(rdp->qlen) = 0;
82820+ ACCESS_ONCE_RW(rdp->qlen) = 0;
82821 }
82822
82823 /*
82824@@ -2101,7 +2101,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
82825 }
82826 smp_mb(); /* List handling before counting for rcu_barrier(). */
82827 rdp->qlen_lazy -= count_lazy;
82828- ACCESS_ONCE(rdp->qlen) -= count;
82829+ ACCESS_ONCE_RW(rdp->qlen) -= count;
82830 rdp->n_cbs_invoked += count;
82831
82832 /* Reinstate batch limit if we have worked down the excess. */
82833@@ -2295,7 +2295,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
82834 /*
82835 * Do RCU core processing for the current CPU.
82836 */
82837-static void rcu_process_callbacks(struct softirq_action *unused)
82838+static void rcu_process_callbacks(void)
82839 {
82840 struct rcu_state *rsp;
82841
82842@@ -2419,7 +2419,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
82843 local_irq_restore(flags);
82844 return;
82845 }
82846- ACCESS_ONCE(rdp->qlen)++;
82847+ ACCESS_ONCE_RW(rdp->qlen)++;
82848 if (lazy)
82849 rdp->qlen_lazy++;
82850 else
82851@@ -2628,11 +2628,11 @@ void synchronize_sched_expedited(void)
82852 * counter wrap on a 32-bit system. Quite a few more CPUs would of
82853 * course be required on a 64-bit system.
82854 */
82855- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
82856+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
82857 (ulong)atomic_long_read(&rsp->expedited_done) +
82858 ULONG_MAX / 8)) {
82859 synchronize_sched();
82860- atomic_long_inc(&rsp->expedited_wrap);
82861+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
82862 return;
82863 }
82864
82865@@ -2640,7 +2640,7 @@ void synchronize_sched_expedited(void)
82866 * Take a ticket. Note that atomic_inc_return() implies a
82867 * full memory barrier.
82868 */
82869- snap = atomic_long_inc_return(&rsp->expedited_start);
82870+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
82871 firstsnap = snap;
82872 get_online_cpus();
82873 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
82874@@ -2653,14 +2653,14 @@ void synchronize_sched_expedited(void)
82875 synchronize_sched_expedited_cpu_stop,
82876 NULL) == -EAGAIN) {
82877 put_online_cpus();
82878- atomic_long_inc(&rsp->expedited_tryfail);
82879+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
82880
82881 /* Check to see if someone else did our work for us. */
82882 s = atomic_long_read(&rsp->expedited_done);
82883 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
82884 /* ensure test happens before caller kfree */
82885 smp_mb__before_atomic_inc(); /* ^^^ */
82886- atomic_long_inc(&rsp->expedited_workdone1);
82887+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
82888 return;
82889 }
82890
82891@@ -2669,7 +2669,7 @@ void synchronize_sched_expedited(void)
82892 udelay(trycount * num_online_cpus());
82893 } else {
82894 wait_rcu_gp(call_rcu_sched);
82895- atomic_long_inc(&rsp->expedited_normal);
82896+ atomic_long_inc_unchecked(&rsp->expedited_normal);
82897 return;
82898 }
82899
82900@@ -2678,7 +2678,7 @@ void synchronize_sched_expedited(void)
82901 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
82902 /* ensure test happens before caller kfree */
82903 smp_mb__before_atomic_inc(); /* ^^^ */
82904- atomic_long_inc(&rsp->expedited_workdone2);
82905+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
82906 return;
82907 }
82908
82909@@ -2690,10 +2690,10 @@ void synchronize_sched_expedited(void)
82910 * period works for us.
82911 */
82912 get_online_cpus();
82913- snap = atomic_long_read(&rsp->expedited_start);
82914+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
82915 smp_mb(); /* ensure read is before try_stop_cpus(). */
82916 }
82917- atomic_long_inc(&rsp->expedited_stoppedcpus);
82918+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
82919
82920 /*
82921 * Everyone up to our most recent fetch is covered by our grace
82922@@ -2702,16 +2702,16 @@ void synchronize_sched_expedited(void)
82923 * than we did already did their update.
82924 */
82925 do {
82926- atomic_long_inc(&rsp->expedited_done_tries);
82927+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
82928 s = atomic_long_read(&rsp->expedited_done);
82929 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
82930 /* ensure test happens before caller kfree */
82931 smp_mb__before_atomic_inc(); /* ^^^ */
82932- atomic_long_inc(&rsp->expedited_done_lost);
82933+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
82934 break;
82935 }
82936 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
82937- atomic_long_inc(&rsp->expedited_done_exit);
82938+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
82939
82940 put_online_cpus();
82941 }
82942@@ -2893,7 +2893,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
82943 * ACCESS_ONCE() to prevent the compiler from speculating
82944 * the increment to precede the early-exit check.
82945 */
82946- ACCESS_ONCE(rsp->n_barrier_done)++;
82947+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
82948 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
82949 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
82950 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
82951@@ -2943,7 +2943,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
82952
82953 /* Increment ->n_barrier_done to prevent duplicate work. */
82954 smp_mb(); /* Keep increment after above mechanism. */
82955- ACCESS_ONCE(rsp->n_barrier_done)++;
82956+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
82957 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
82958 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
82959 smp_mb(); /* Keep increment before caller's subsequent code. */
82960@@ -2988,10 +2988,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
82961 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
82962 init_callback_list(rdp);
82963 rdp->qlen_lazy = 0;
82964- ACCESS_ONCE(rdp->qlen) = 0;
82965+ ACCESS_ONCE_RW(rdp->qlen) = 0;
82966 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
82967 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
82968- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
82969+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
82970 rdp->cpu = cpu;
82971 rdp->rsp = rsp;
82972 rcu_boot_init_nocb_percpu_data(rdp);
82973@@ -3024,8 +3024,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
82974 rdp->blimit = blimit;
82975 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
82976 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
82977- atomic_set(&rdp->dynticks->dynticks,
82978- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
82979+ atomic_set_unchecked(&rdp->dynticks->dynticks,
82980+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
82981 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
82982
82983 /* Add CPU to rcu_node bitmasks. */
82984@@ -3120,7 +3120,7 @@ static int __init rcu_spawn_gp_kthread(void)
82985 struct task_struct *t;
82986
82987 for_each_rcu_flavor(rsp) {
82988- t = kthread_run(rcu_gp_kthread, rsp, rsp->name);
82989+ t = kthread_run(rcu_gp_kthread, rsp, "%s", rsp->name);
82990 BUG_ON(IS_ERR(t));
82991 rnp = rcu_get_root(rsp);
82992 raw_spin_lock_irqsave(&rnp->lock, flags);
82993diff --git a/kernel/rcutree.h b/kernel/rcutree.h
82994index 4df5034..5ee93f2 100644
82995--- a/kernel/rcutree.h
82996+++ b/kernel/rcutree.h
82997@@ -87,7 +87,7 @@ struct rcu_dynticks {
82998 long long dynticks_nesting; /* Track irq/process nesting level. */
82999 /* Process level is worth LLONG_MAX/2. */
83000 int dynticks_nmi_nesting; /* Track NMI nesting level. */
83001- atomic_t dynticks; /* Even value for idle, else odd. */
83002+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
83003 #ifdef CONFIG_RCU_FAST_NO_HZ
83004 bool all_lazy; /* Are all CPU's CBs lazy? */
83005 unsigned long nonlazy_posted;
83006@@ -414,17 +414,17 @@ struct rcu_state {
83007 /* _rcu_barrier(). */
83008 /* End of fields guarded by barrier_mutex. */
83009
83010- atomic_long_t expedited_start; /* Starting ticket. */
83011- atomic_long_t expedited_done; /* Done ticket. */
83012- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
83013- atomic_long_t expedited_tryfail; /* # acquisition failures. */
83014- atomic_long_t expedited_workdone1; /* # done by others #1. */
83015- atomic_long_t expedited_workdone2; /* # done by others #2. */
83016- atomic_long_t expedited_normal; /* # fallbacks to normal. */
83017- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
83018- atomic_long_t expedited_done_tries; /* # tries to update _done. */
83019- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
83020- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
83021+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
83022+ atomic_long_t expedited_done; /* Done ticket. */
83023+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
83024+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
83025+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
83026+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
83027+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
83028+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
83029+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
83030+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
83031+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
83032
83033 unsigned long jiffies_force_qs; /* Time at which to invoke */
83034 /* force_quiescent_state(). */
83035diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
83036index 3db5a37..b395fb35 100644
83037--- a/kernel/rcutree_plugin.h
83038+++ b/kernel/rcutree_plugin.h
83039@@ -903,7 +903,7 @@ void synchronize_rcu_expedited(void)
83040
83041 /* Clean up and exit. */
83042 smp_mb(); /* ensure expedited GP seen before counter increment. */
83043- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
83044+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
83045 unlock_mb_ret:
83046 mutex_unlock(&sync_rcu_preempt_exp_mutex);
83047 mb_ret:
83048@@ -1451,7 +1451,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
83049 free_cpumask_var(cm);
83050 }
83051
83052-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
83053+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
83054 .store = &rcu_cpu_kthread_task,
83055 .thread_should_run = rcu_cpu_kthread_should_run,
83056 .thread_fn = rcu_cpu_kthread,
83057@@ -1916,7 +1916,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
83058 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
83059 printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
83060 cpu, ticks_value, ticks_title,
83061- atomic_read(&rdtp->dynticks) & 0xfff,
83062+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
83063 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
83064 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
83065 fast_no_hz);
83066@@ -2079,7 +2079,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
83067
83068 /* Enqueue the callback on the nocb list and update counts. */
83069 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
83070- ACCESS_ONCE(*old_rhpp) = rhp;
83071+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
83072 atomic_long_add(rhcount, &rdp->nocb_q_count);
83073 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
83074
83075@@ -2219,12 +2219,12 @@ static int rcu_nocb_kthread(void *arg)
83076 * Extract queued callbacks, update counts, and wait
83077 * for a grace period to elapse.
83078 */
83079- ACCESS_ONCE(rdp->nocb_head) = NULL;
83080+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
83081 tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
83082 c = atomic_long_xchg(&rdp->nocb_q_count, 0);
83083 cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
83084- ACCESS_ONCE(rdp->nocb_p_count) += c;
83085- ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
83086+ ACCESS_ONCE_RW(rdp->nocb_p_count) += c;
83087+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) += cl;
83088 rcu_nocb_wait_gp(rdp);
83089
83090 /* Each pass through the following loop invokes a callback. */
83091@@ -2246,8 +2246,8 @@ static int rcu_nocb_kthread(void *arg)
83092 list = next;
83093 }
83094 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
83095- ACCESS_ONCE(rdp->nocb_p_count) -= c;
83096- ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
83097+ ACCESS_ONCE_RW(rdp->nocb_p_count) -= c;
83098+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) -= cl;
83099 rdp->n_nocbs_invoked += c;
83100 }
83101 return 0;
83102@@ -2274,7 +2274,7 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
83103 t = kthread_run(rcu_nocb_kthread, rdp,
83104 "rcuo%c/%d", rsp->abbr, cpu);
83105 BUG_ON(IS_ERR(t));
83106- ACCESS_ONCE(rdp->nocb_kthread) = t;
83107+ ACCESS_ONCE_RW(rdp->nocb_kthread) = t;
83108 }
83109 }
83110
83111diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
83112index cf6c174..a8f4b50 100644
83113--- a/kernel/rcutree_trace.c
83114+++ b/kernel/rcutree_trace.c
83115@@ -121,7 +121,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
83116 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
83117 rdp->passed_quiesce, rdp->qs_pending);
83118 seq_printf(m, " dt=%d/%llx/%d df=%lu",
83119- atomic_read(&rdp->dynticks->dynticks),
83120+ atomic_read_unchecked(&rdp->dynticks->dynticks),
83121 rdp->dynticks->dynticks_nesting,
83122 rdp->dynticks->dynticks_nmi_nesting,
83123 rdp->dynticks_fqs);
83124@@ -182,17 +182,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
83125 struct rcu_state *rsp = (struct rcu_state *)m->private;
83126
83127 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
83128- atomic_long_read(&rsp->expedited_start),
83129+ atomic_long_read_unchecked(&rsp->expedited_start),
83130 atomic_long_read(&rsp->expedited_done),
83131- atomic_long_read(&rsp->expedited_wrap),
83132- atomic_long_read(&rsp->expedited_tryfail),
83133- atomic_long_read(&rsp->expedited_workdone1),
83134- atomic_long_read(&rsp->expedited_workdone2),
83135- atomic_long_read(&rsp->expedited_normal),
83136- atomic_long_read(&rsp->expedited_stoppedcpus),
83137- atomic_long_read(&rsp->expedited_done_tries),
83138- atomic_long_read(&rsp->expedited_done_lost),
83139- atomic_long_read(&rsp->expedited_done_exit));
83140+ atomic_long_read_unchecked(&rsp->expedited_wrap),
83141+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
83142+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
83143+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
83144+ atomic_long_read_unchecked(&rsp->expedited_normal),
83145+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
83146+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
83147+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
83148+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
83149 return 0;
83150 }
83151
83152diff --git a/kernel/resource.c b/kernel/resource.c
83153index d738698..5f8e60a 100644
83154--- a/kernel/resource.c
83155+++ b/kernel/resource.c
83156@@ -152,8 +152,18 @@ static const struct file_operations proc_iomem_operations = {
83157
83158 static int __init ioresources_init(void)
83159 {
83160+#ifdef CONFIG_GRKERNSEC_PROC_ADD
83161+#ifdef CONFIG_GRKERNSEC_PROC_USER
83162+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
83163+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
83164+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
83165+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
83166+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
83167+#endif
83168+#else
83169 proc_create("ioports", 0, NULL, &proc_ioports_operations);
83170 proc_create("iomem", 0, NULL, &proc_iomem_operations);
83171+#endif
83172 return 0;
83173 }
83174 __initcall(ioresources_init);
83175diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
83176index 1d96dd0..994ff19 100644
83177--- a/kernel/rtmutex-tester.c
83178+++ b/kernel/rtmutex-tester.c
83179@@ -22,7 +22,7 @@
83180 #define MAX_RT_TEST_MUTEXES 8
83181
83182 static spinlock_t rttest_lock;
83183-static atomic_t rttest_event;
83184+static atomic_unchecked_t rttest_event;
83185
83186 struct test_thread_data {
83187 int opcode;
83188@@ -63,7 +63,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
83189
83190 case RTTEST_LOCKCONT:
83191 td->mutexes[td->opdata] = 1;
83192- td->event = atomic_add_return(1, &rttest_event);
83193+ td->event = atomic_add_return_unchecked(1, &rttest_event);
83194 return 0;
83195
83196 case RTTEST_RESET:
83197@@ -76,7 +76,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
83198 return 0;
83199
83200 case RTTEST_RESETEVENT:
83201- atomic_set(&rttest_event, 0);
83202+ atomic_set_unchecked(&rttest_event, 0);
83203 return 0;
83204
83205 default:
83206@@ -93,9 +93,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
83207 return ret;
83208
83209 td->mutexes[id] = 1;
83210- td->event = atomic_add_return(1, &rttest_event);
83211+ td->event = atomic_add_return_unchecked(1, &rttest_event);
83212 rt_mutex_lock(&mutexes[id]);
83213- td->event = atomic_add_return(1, &rttest_event);
83214+ td->event = atomic_add_return_unchecked(1, &rttest_event);
83215 td->mutexes[id] = 4;
83216 return 0;
83217
83218@@ -106,9 +106,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
83219 return ret;
83220
83221 td->mutexes[id] = 1;
83222- td->event = atomic_add_return(1, &rttest_event);
83223+ td->event = atomic_add_return_unchecked(1, &rttest_event);
83224 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
83225- td->event = atomic_add_return(1, &rttest_event);
83226+ td->event = atomic_add_return_unchecked(1, &rttest_event);
83227 td->mutexes[id] = ret ? 0 : 4;
83228 return ret ? -EINTR : 0;
83229
83230@@ -117,9 +117,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
83231 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
83232 return ret;
83233
83234- td->event = atomic_add_return(1, &rttest_event);
83235+ td->event = atomic_add_return_unchecked(1, &rttest_event);
83236 rt_mutex_unlock(&mutexes[id]);
83237- td->event = atomic_add_return(1, &rttest_event);
83238+ td->event = atomic_add_return_unchecked(1, &rttest_event);
83239 td->mutexes[id] = 0;
83240 return 0;
83241
83242@@ -166,7 +166,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
83243 break;
83244
83245 td->mutexes[dat] = 2;
83246- td->event = atomic_add_return(1, &rttest_event);
83247+ td->event = atomic_add_return_unchecked(1, &rttest_event);
83248 break;
83249
83250 default:
83251@@ -186,7 +186,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
83252 return;
83253
83254 td->mutexes[dat] = 3;
83255- td->event = atomic_add_return(1, &rttest_event);
83256+ td->event = atomic_add_return_unchecked(1, &rttest_event);
83257 break;
83258
83259 case RTTEST_LOCKNOWAIT:
83260@@ -198,7 +198,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
83261 return;
83262
83263 td->mutexes[dat] = 1;
83264- td->event = atomic_add_return(1, &rttest_event);
83265+ td->event = atomic_add_return_unchecked(1, &rttest_event);
83266 return;
83267
83268 default:
83269diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
83270index 64de5f8..7735e12 100644
83271--- a/kernel/sched/auto_group.c
83272+++ b/kernel/sched/auto_group.c
83273@@ -11,7 +11,7 @@
83274
83275 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
83276 static struct autogroup autogroup_default;
83277-static atomic_t autogroup_seq_nr;
83278+static atomic_unchecked_t autogroup_seq_nr;
83279
83280 void __init autogroup_init(struct task_struct *init_task)
83281 {
83282@@ -81,7 +81,7 @@ static inline struct autogroup *autogroup_create(void)
83283
83284 kref_init(&ag->kref);
83285 init_rwsem(&ag->lock);
83286- ag->id = atomic_inc_return(&autogroup_seq_nr);
83287+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
83288 ag->tg = tg;
83289 #ifdef CONFIG_RT_GROUP_SCHED
83290 /*
83291diff --git a/kernel/sched/core.c b/kernel/sched/core.c
83292index e8b3350..d83d44e 100644
83293--- a/kernel/sched/core.c
83294+++ b/kernel/sched/core.c
83295@@ -3440,7 +3440,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
83296 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
83297 * positive (at least 1, or number of jiffies left till timeout) if completed.
83298 */
83299-long __sched
83300+long __sched __intentional_overflow(-1)
83301 wait_for_completion_interruptible_timeout(struct completion *x,
83302 unsigned long timeout)
83303 {
83304@@ -3457,7 +3457,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
83305 *
83306 * The return value is -ERESTARTSYS if interrupted, 0 if completed.
83307 */
83308-int __sched wait_for_completion_killable(struct completion *x)
83309+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
83310 {
83311 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
83312 if (t == -ERESTARTSYS)
83313@@ -3478,7 +3478,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
83314 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
83315 * positive (at least 1, or number of jiffies left till timeout) if completed.
83316 */
83317-long __sched
83318+long __sched __intentional_overflow(-1)
83319 wait_for_completion_killable_timeout(struct completion *x,
83320 unsigned long timeout)
83321 {
83322@@ -3704,6 +3704,8 @@ int can_nice(const struct task_struct *p, const int nice)
83323 /* convert nice value [19,-20] to rlimit style value [1,40] */
83324 int nice_rlim = 20 - nice;
83325
83326+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
83327+
83328 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
83329 capable(CAP_SYS_NICE));
83330 }
83331@@ -3737,7 +3739,8 @@ SYSCALL_DEFINE1(nice, int, increment)
83332 if (nice > 19)
83333 nice = 19;
83334
83335- if (increment < 0 && !can_nice(current, nice))
83336+ if (increment < 0 && (!can_nice(current, nice) ||
83337+ gr_handle_chroot_nice()))
83338 return -EPERM;
83339
83340 retval = security_task_setnice(current, nice);
83341@@ -3891,6 +3894,7 @@ recheck:
83342 unsigned long rlim_rtprio =
83343 task_rlimit(p, RLIMIT_RTPRIO);
83344
83345+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
83346 /* can't set/change the rt policy */
83347 if (policy != p->policy && !rlim_rtprio)
83348 return -EPERM;
83349@@ -4988,7 +4992,7 @@ static void migrate_tasks(unsigned int dead_cpu)
83350
83351 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
83352
83353-static struct ctl_table sd_ctl_dir[] = {
83354+static ctl_table_no_const sd_ctl_dir[] __read_only = {
83355 {
83356 .procname = "sched_domain",
83357 .mode = 0555,
83358@@ -5005,17 +5009,17 @@ static struct ctl_table sd_ctl_root[] = {
83359 {}
83360 };
83361
83362-static struct ctl_table *sd_alloc_ctl_entry(int n)
83363+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
83364 {
83365- struct ctl_table *entry =
83366+ ctl_table_no_const *entry =
83367 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
83368
83369 return entry;
83370 }
83371
83372-static void sd_free_ctl_entry(struct ctl_table **tablep)
83373+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
83374 {
83375- struct ctl_table *entry;
83376+ ctl_table_no_const *entry;
83377
83378 /*
83379 * In the intermediate directories, both the child directory and
83380@@ -5023,22 +5027,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
83381 * will always be set. In the lowest directory the names are
83382 * static strings and all have proc handlers.
83383 */
83384- for (entry = *tablep; entry->mode; entry++) {
83385- if (entry->child)
83386- sd_free_ctl_entry(&entry->child);
83387+ for (entry = tablep; entry->mode; entry++) {
83388+ if (entry->child) {
83389+ sd_free_ctl_entry(entry->child);
83390+ pax_open_kernel();
83391+ entry->child = NULL;
83392+ pax_close_kernel();
83393+ }
83394 if (entry->proc_handler == NULL)
83395 kfree(entry->procname);
83396 }
83397
83398- kfree(*tablep);
83399- *tablep = NULL;
83400+ kfree(tablep);
83401 }
83402
83403 static int min_load_idx = 0;
83404 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
83405
83406 static void
83407-set_table_entry(struct ctl_table *entry,
83408+set_table_entry(ctl_table_no_const *entry,
83409 const char *procname, void *data, int maxlen,
83410 umode_t mode, proc_handler *proc_handler,
83411 bool load_idx)
83412@@ -5058,7 +5065,7 @@ set_table_entry(struct ctl_table *entry,
83413 static struct ctl_table *
83414 sd_alloc_ctl_domain_table(struct sched_domain *sd)
83415 {
83416- struct ctl_table *table = sd_alloc_ctl_entry(13);
83417+ ctl_table_no_const *table = sd_alloc_ctl_entry(13);
83418
83419 if (table == NULL)
83420 return NULL;
83421@@ -5093,9 +5100,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
83422 return table;
83423 }
83424
83425-static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
83426+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
83427 {
83428- struct ctl_table *entry, *table;
83429+ ctl_table_no_const *entry, *table;
83430 struct sched_domain *sd;
83431 int domain_num = 0, i;
83432 char buf[32];
83433@@ -5122,11 +5129,13 @@ static struct ctl_table_header *sd_sysctl_header;
83434 static void register_sched_domain_sysctl(void)
83435 {
83436 int i, cpu_num = num_possible_cpus();
83437- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
83438+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
83439 char buf[32];
83440
83441 WARN_ON(sd_ctl_dir[0].child);
83442+ pax_open_kernel();
83443 sd_ctl_dir[0].child = entry;
83444+ pax_close_kernel();
83445
83446 if (entry == NULL)
83447 return;
83448@@ -5149,8 +5158,12 @@ static void unregister_sched_domain_sysctl(void)
83449 if (sd_sysctl_header)
83450 unregister_sysctl_table(sd_sysctl_header);
83451 sd_sysctl_header = NULL;
83452- if (sd_ctl_dir[0].child)
83453- sd_free_ctl_entry(&sd_ctl_dir[0].child);
83454+ if (sd_ctl_dir[0].child) {
83455+ sd_free_ctl_entry(sd_ctl_dir[0].child);
83456+ pax_open_kernel();
83457+ sd_ctl_dir[0].child = NULL;
83458+ pax_close_kernel();
83459+ }
83460 }
83461 #else
83462 static void register_sched_domain_sysctl(void)
83463@@ -5249,7 +5262,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
83464 * happens before everything else. This has to be lower priority than
83465 * the notifier in the perf_event subsystem, though.
83466 */
83467-static struct notifier_block __cpuinitdata migration_notifier = {
83468+static struct notifier_block migration_notifier = {
83469 .notifier_call = migration_call,
83470 .priority = CPU_PRI_MIGRATION,
83471 };
83472diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
83473index 03b73be..9422b9f 100644
83474--- a/kernel/sched/fair.c
83475+++ b/kernel/sched/fair.c
83476@@ -831,7 +831,7 @@ void task_numa_fault(int node, int pages, bool migrated)
83477
83478 static void reset_ptenuma_scan(struct task_struct *p)
83479 {
83480- ACCESS_ONCE(p->mm->numa_scan_seq)++;
83481+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
83482 p->mm->numa_scan_offset = 0;
83483 }
83484
83485@@ -5687,7 +5687,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
83486 * run_rebalance_domains is triggered when needed from the scheduler tick.
83487 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
83488 */
83489-static void run_rebalance_domains(struct softirq_action *h)
83490+static void run_rebalance_domains(void)
83491 {
83492 int this_cpu = smp_processor_id();
83493 struct rq *this_rq = cpu_rq(this_cpu);
83494diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
83495index ce39224d..0e09343 100644
83496--- a/kernel/sched/sched.h
83497+++ b/kernel/sched/sched.h
83498@@ -1009,7 +1009,7 @@ struct sched_class {
83499 #ifdef CONFIG_FAIR_GROUP_SCHED
83500 void (*task_move_group) (struct task_struct *p, int on_rq);
83501 #endif
83502-};
83503+} __do_const;
83504
83505 #define sched_class_highest (&stop_sched_class)
83506 #define for_each_class(class) \
83507diff --git a/kernel/signal.c b/kernel/signal.c
83508index 113411b..20d0a99 100644
83509--- a/kernel/signal.c
83510+++ b/kernel/signal.c
83511@@ -51,12 +51,12 @@ static struct kmem_cache *sigqueue_cachep;
83512
83513 int print_fatal_signals __read_mostly;
83514
83515-static void __user *sig_handler(struct task_struct *t, int sig)
83516+static __sighandler_t sig_handler(struct task_struct *t, int sig)
83517 {
83518 return t->sighand->action[sig - 1].sa.sa_handler;
83519 }
83520
83521-static int sig_handler_ignored(void __user *handler, int sig)
83522+static int sig_handler_ignored(__sighandler_t handler, int sig)
83523 {
83524 /* Is it explicitly or implicitly ignored? */
83525 return handler == SIG_IGN ||
83526@@ -65,7 +65,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
83527
83528 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
83529 {
83530- void __user *handler;
83531+ __sighandler_t handler;
83532
83533 handler = sig_handler(t, sig);
83534
83535@@ -369,6 +369,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
83536 atomic_inc(&user->sigpending);
83537 rcu_read_unlock();
83538
83539+ if (!override_rlimit)
83540+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
83541+
83542 if (override_rlimit ||
83543 atomic_read(&user->sigpending) <=
83544 task_rlimit(t, RLIMIT_SIGPENDING)) {
83545@@ -496,7 +499,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
83546
83547 int unhandled_signal(struct task_struct *tsk, int sig)
83548 {
83549- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
83550+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
83551 if (is_global_init(tsk))
83552 return 1;
83553 if (handler != SIG_IGN && handler != SIG_DFL)
83554@@ -816,6 +819,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
83555 }
83556 }
83557
83558+ /* allow glibc communication via tgkill to other threads in our
83559+ thread group */
83560+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
83561+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
83562+ && gr_handle_signal(t, sig))
83563+ return -EPERM;
83564+
83565 return security_task_kill(t, info, sig, 0);
83566 }
83567
83568@@ -1199,7 +1209,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
83569 return send_signal(sig, info, p, 1);
83570 }
83571
83572-static int
83573+int
83574 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
83575 {
83576 return send_signal(sig, info, t, 0);
83577@@ -1236,6 +1246,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
83578 unsigned long int flags;
83579 int ret, blocked, ignored;
83580 struct k_sigaction *action;
83581+ int is_unhandled = 0;
83582
83583 spin_lock_irqsave(&t->sighand->siglock, flags);
83584 action = &t->sighand->action[sig-1];
83585@@ -1250,9 +1261,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
83586 }
83587 if (action->sa.sa_handler == SIG_DFL)
83588 t->signal->flags &= ~SIGNAL_UNKILLABLE;
83589+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
83590+ is_unhandled = 1;
83591 ret = specific_send_sig_info(sig, info, t);
83592 spin_unlock_irqrestore(&t->sighand->siglock, flags);
83593
83594+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
83595+ normal operation */
83596+ if (is_unhandled) {
83597+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
83598+ gr_handle_crash(t, sig);
83599+ }
83600+
83601 return ret;
83602 }
83603
83604@@ -1319,8 +1339,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
83605 ret = check_kill_permission(sig, info, p);
83606 rcu_read_unlock();
83607
83608- if (!ret && sig)
83609+ if (!ret && sig) {
83610 ret = do_send_sig_info(sig, info, p, true);
83611+ if (!ret)
83612+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
83613+ }
83614
83615 return ret;
83616 }
83617@@ -2926,7 +2949,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
83618 int error = -ESRCH;
83619
83620 rcu_read_lock();
83621- p = find_task_by_vpid(pid);
83622+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
83623+ /* allow glibc communication via tgkill to other threads in our
83624+ thread group */
83625+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
83626+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
83627+ p = find_task_by_vpid_unrestricted(pid);
83628+ else
83629+#endif
83630+ p = find_task_by_vpid(pid);
83631 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
83632 error = check_kill_permission(sig, info, p);
83633 /*
83634@@ -3219,6 +3250,16 @@ int __save_altstack(stack_t __user *uss, unsigned long sp)
83635 __put_user(t->sas_ss_size, &uss->ss_size);
83636 }
83637
83638+#ifdef CONFIG_X86
83639+void __save_altstack_ex(stack_t __user *uss, unsigned long sp)
83640+{
83641+ struct task_struct *t = current;
83642+ put_user_ex((void __user *)t->sas_ss_sp, &uss->ss_sp);
83643+ put_user_ex(sas_ss_flags(sp), &uss->ss_flags);
83644+ put_user_ex(t->sas_ss_size, &uss->ss_size);
83645+}
83646+#endif
83647+
83648 #ifdef CONFIG_COMPAT
83649 COMPAT_SYSCALL_DEFINE2(sigaltstack,
83650 const compat_stack_t __user *, uss_ptr,
83651@@ -3240,8 +3281,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
83652 }
83653 seg = get_fs();
83654 set_fs(KERNEL_DS);
83655- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
83656- (stack_t __force __user *) &uoss,
83657+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
83658+ (stack_t __force_user *) &uoss,
83659 compat_user_stack_pointer());
83660 set_fs(seg);
83661 if (ret >= 0 && uoss_ptr) {
83662@@ -3268,6 +3309,16 @@ int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
83663 __put_user(sas_ss_flags(sp), &uss->ss_flags) |
83664 __put_user(t->sas_ss_size, &uss->ss_size);
83665 }
83666+
83667+#ifdef CONFIG_X86
83668+void __compat_save_altstack_ex(compat_stack_t __user *uss, unsigned long sp)
83669+{
83670+ struct task_struct *t = current;
83671+ put_user_ex(ptr_to_compat((void __user *)t->sas_ss_sp), &uss->ss_sp);
83672+ put_user_ex(sas_ss_flags(sp), &uss->ss_flags);
83673+ put_user_ex(t->sas_ss_size, &uss->ss_size);
83674+}
83675+#endif
83676 #endif
83677
83678 #ifdef __ARCH_WANT_SYS_SIGPENDING
83679diff --git a/kernel/smp.c b/kernel/smp.c
83680index 4dba0f7..fe9f773 100644
83681--- a/kernel/smp.c
83682+++ b/kernel/smp.c
83683@@ -73,7 +73,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
83684 return NOTIFY_OK;
83685 }
83686
83687-static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
83688+static struct notifier_block hotplug_cfd_notifier = {
83689 .notifier_call = hotplug_cfd,
83690 };
83691
83692diff --git a/kernel/smpboot.c b/kernel/smpboot.c
83693index 02fc5c9..e54c335 100644
83694--- a/kernel/smpboot.c
83695+++ b/kernel/smpboot.c
83696@@ -288,7 +288,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
83697 }
83698 smpboot_unpark_thread(plug_thread, cpu);
83699 }
83700- list_add(&plug_thread->list, &hotplug_threads);
83701+ pax_list_add(&plug_thread->list, &hotplug_threads);
83702 out:
83703 mutex_unlock(&smpboot_threads_lock);
83704 return ret;
83705@@ -305,7 +305,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
83706 {
83707 get_online_cpus();
83708 mutex_lock(&smpboot_threads_lock);
83709- list_del(&plug_thread->list);
83710+ pax_list_del(&plug_thread->list);
83711 smpboot_destroy_threads(plug_thread);
83712 mutex_unlock(&smpboot_threads_lock);
83713 put_online_cpus();
83714diff --git a/kernel/softirq.c b/kernel/softirq.c
83715index 3d6833f..da6d93d 100644
83716--- a/kernel/softirq.c
83717+++ b/kernel/softirq.c
83718@@ -53,11 +53,11 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
83719 EXPORT_SYMBOL(irq_stat);
83720 #endif
83721
83722-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
83723+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
83724
83725 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
83726
83727-char *softirq_to_name[NR_SOFTIRQS] = {
83728+const char * const softirq_to_name[NR_SOFTIRQS] = {
83729 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
83730 "TASKLET", "SCHED", "HRTIMER", "RCU"
83731 };
83732@@ -250,7 +250,7 @@ restart:
83733 kstat_incr_softirqs_this_cpu(vec_nr);
83734
83735 trace_softirq_entry(vec_nr);
83736- h->action(h);
83737+ h->action();
83738 trace_softirq_exit(vec_nr);
83739 if (unlikely(prev_count != preempt_count())) {
83740 printk(KERN_ERR "huh, entered softirq %u %s %p"
83741@@ -405,7 +405,7 @@ void __raise_softirq_irqoff(unsigned int nr)
83742 or_softirq_pending(1UL << nr);
83743 }
83744
83745-void open_softirq(int nr, void (*action)(struct softirq_action *))
83746+void __init open_softirq(int nr, void (*action)(void))
83747 {
83748 softirq_vec[nr].action = action;
83749 }
83750@@ -461,7 +461,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
83751
83752 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
83753
83754-static void tasklet_action(struct softirq_action *a)
83755+static void tasklet_action(void)
83756 {
83757 struct tasklet_struct *list;
83758
83759@@ -496,7 +496,7 @@ static void tasklet_action(struct softirq_action *a)
83760 }
83761 }
83762
83763-static void tasklet_hi_action(struct softirq_action *a)
83764+static void tasklet_hi_action(void)
83765 {
83766 struct tasklet_struct *list;
83767
83768@@ -730,7 +730,7 @@ static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
83769 return NOTIFY_OK;
83770 }
83771
83772-static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
83773+static struct notifier_block remote_softirq_cpu_notifier = {
83774 .notifier_call = remote_softirq_cpu_notify,
83775 };
83776
83777@@ -847,11 +847,11 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
83778 return NOTIFY_OK;
83779 }
83780
83781-static struct notifier_block __cpuinitdata cpu_nfb = {
83782+static struct notifier_block cpu_nfb = {
83783 .notifier_call = cpu_callback
83784 };
83785
83786-static struct smp_hotplug_thread softirq_threads = {
83787+static struct smp_hotplug_thread softirq_threads __read_only = {
83788 .store = &ksoftirqd,
83789 .thread_should_run = ksoftirqd_should_run,
83790 .thread_fn = run_ksoftirqd,
83791diff --git a/kernel/srcu.c b/kernel/srcu.c
83792index 01d5ccb..cdcbee6 100644
83793--- a/kernel/srcu.c
83794+++ b/kernel/srcu.c
83795@@ -300,9 +300,9 @@ int __srcu_read_lock(struct srcu_struct *sp)
83796
83797 idx = ACCESS_ONCE(sp->completed) & 0x1;
83798 preempt_disable();
83799- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
83800+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
83801 smp_mb(); /* B */ /* Avoid leaking the critical section. */
83802- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
83803+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
83804 preempt_enable();
83805 return idx;
83806 }
83807diff --git a/kernel/sys.c b/kernel/sys.c
83808index 2bbd9a7..0875671 100644
83809--- a/kernel/sys.c
83810+++ b/kernel/sys.c
83811@@ -163,6 +163,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
83812 error = -EACCES;
83813 goto out;
83814 }
83815+
83816+ if (gr_handle_chroot_setpriority(p, niceval)) {
83817+ error = -EACCES;
83818+ goto out;
83819+ }
83820+
83821 no_nice = security_task_setnice(p, niceval);
83822 if (no_nice) {
83823 error = no_nice;
83824@@ -626,6 +632,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
83825 goto error;
83826 }
83827
83828+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
83829+ goto error;
83830+
83831 if (rgid != (gid_t) -1 ||
83832 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
83833 new->sgid = new->egid;
83834@@ -661,6 +670,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
83835 old = current_cred();
83836
83837 retval = -EPERM;
83838+
83839+ if (gr_check_group_change(kgid, kgid, kgid))
83840+ goto error;
83841+
83842 if (nsown_capable(CAP_SETGID))
83843 new->gid = new->egid = new->sgid = new->fsgid = kgid;
83844 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
83845@@ -678,7 +691,7 @@ error:
83846 /*
83847 * change the user struct in a credentials set to match the new UID
83848 */
83849-static int set_user(struct cred *new)
83850+int set_user(struct cred *new)
83851 {
83852 struct user_struct *new_user;
83853
83854@@ -758,6 +771,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
83855 goto error;
83856 }
83857
83858+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
83859+ goto error;
83860+
83861 if (!uid_eq(new->uid, old->uid)) {
83862 retval = set_user(new);
83863 if (retval < 0)
83864@@ -808,6 +824,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
83865 old = current_cred();
83866
83867 retval = -EPERM;
83868+
83869+ if (gr_check_crash_uid(kuid))
83870+ goto error;
83871+ if (gr_check_user_change(kuid, kuid, kuid))
83872+ goto error;
83873+
83874 if (nsown_capable(CAP_SETUID)) {
83875 new->suid = new->uid = kuid;
83876 if (!uid_eq(kuid, old->uid)) {
83877@@ -877,6 +899,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
83878 goto error;
83879 }
83880
83881+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
83882+ goto error;
83883+
83884 if (ruid != (uid_t) -1) {
83885 new->uid = kruid;
83886 if (!uid_eq(kruid, old->uid)) {
83887@@ -959,6 +984,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
83888 goto error;
83889 }
83890
83891+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
83892+ goto error;
83893+
83894 if (rgid != (gid_t) -1)
83895 new->gid = krgid;
83896 if (egid != (gid_t) -1)
83897@@ -1020,12 +1048,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
83898 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
83899 nsown_capable(CAP_SETUID)) {
83900 if (!uid_eq(kuid, old->fsuid)) {
83901+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
83902+ goto error;
83903+
83904 new->fsuid = kuid;
83905 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
83906 goto change_okay;
83907 }
83908 }
83909
83910+error:
83911 abort_creds(new);
83912 return old_fsuid;
83913
83914@@ -1058,12 +1090,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
83915 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
83916 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
83917 nsown_capable(CAP_SETGID)) {
83918+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
83919+ goto error;
83920+
83921 if (!gid_eq(kgid, old->fsgid)) {
83922 new->fsgid = kgid;
83923 goto change_okay;
83924 }
83925 }
83926
83927+error:
83928 abort_creds(new);
83929 return old_fsgid;
83930
83931@@ -1432,19 +1468,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
83932 return -EFAULT;
83933
83934 down_read(&uts_sem);
83935- error = __copy_to_user(&name->sysname, &utsname()->sysname,
83936+ error = __copy_to_user(name->sysname, &utsname()->sysname,
83937 __OLD_UTS_LEN);
83938 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
83939- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
83940+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
83941 __OLD_UTS_LEN);
83942 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
83943- error |= __copy_to_user(&name->release, &utsname()->release,
83944+ error |= __copy_to_user(name->release, &utsname()->release,
83945 __OLD_UTS_LEN);
83946 error |= __put_user(0, name->release + __OLD_UTS_LEN);
83947- error |= __copy_to_user(&name->version, &utsname()->version,
83948+ error |= __copy_to_user(name->version, &utsname()->version,
83949 __OLD_UTS_LEN);
83950 error |= __put_user(0, name->version + __OLD_UTS_LEN);
83951- error |= __copy_to_user(&name->machine, &utsname()->machine,
83952+ error |= __copy_to_user(name->machine, &utsname()->machine,
83953 __OLD_UTS_LEN);
83954 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
83955 up_read(&uts_sem);
83956@@ -1646,6 +1682,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
83957 */
83958 new_rlim->rlim_cur = 1;
83959 }
83960+ /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
83961+ is changed to a lower value. Since tasks can be created by the same
83962+ user in between this limit change and an execve by this task, force
83963+ a recheck only for this task by setting PF_NPROC_EXCEEDED
83964+ */
83965+ if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
83966+ tsk->flags |= PF_NPROC_EXCEEDED;
83967 }
83968 if (!retval) {
83969 if (old_rlim)
83970diff --git a/kernel/sysctl.c b/kernel/sysctl.c
83971index 9edcf45..713c960 100644
83972--- a/kernel/sysctl.c
83973+++ b/kernel/sysctl.c
83974@@ -93,7 +93,6 @@
83975
83976
83977 #if defined(CONFIG_SYSCTL)
83978-
83979 /* External variables not in a header file. */
83980 extern int sysctl_overcommit_memory;
83981 extern int sysctl_overcommit_ratio;
83982@@ -119,18 +118,18 @@ extern int blk_iopoll_enabled;
83983
83984 /* Constants used for minimum and maximum */
83985 #ifdef CONFIG_LOCKUP_DETECTOR
83986-static int sixty = 60;
83987-static int neg_one = -1;
83988+static int sixty __read_only = 60;
83989 #endif
83990
83991-static int zero;
83992-static int __maybe_unused one = 1;
83993-static int __maybe_unused two = 2;
83994-static int __maybe_unused three = 3;
83995-static unsigned long one_ul = 1;
83996-static int one_hundred = 100;
83997+static int neg_one __read_only = -1;
83998+static int zero __read_only = 0;
83999+static int __maybe_unused one __read_only = 1;
84000+static int __maybe_unused two __read_only = 2;
84001+static int __maybe_unused three __read_only = 3;
84002+static unsigned long one_ul __read_only = 1;
84003+static int one_hundred __read_only = 100;
84004 #ifdef CONFIG_PRINTK
84005-static int ten_thousand = 10000;
84006+static int ten_thousand __read_only = 10000;
84007 #endif
84008
84009 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
84010@@ -177,10 +176,8 @@ static int proc_taint(struct ctl_table *table, int write,
84011 void __user *buffer, size_t *lenp, loff_t *ppos);
84012 #endif
84013
84014-#ifdef CONFIG_PRINTK
84015 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
84016 void __user *buffer, size_t *lenp, loff_t *ppos);
84017-#endif
84018
84019 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
84020 void __user *buffer, size_t *lenp, loff_t *ppos);
84021@@ -211,6 +208,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
84022
84023 #endif
84024
84025+extern struct ctl_table grsecurity_table[];
84026+
84027 static struct ctl_table kern_table[];
84028 static struct ctl_table vm_table[];
84029 static struct ctl_table fs_table[];
84030@@ -225,6 +224,20 @@ extern struct ctl_table epoll_table[];
84031 int sysctl_legacy_va_layout;
84032 #endif
84033
84034+#ifdef CONFIG_PAX_SOFTMODE
84035+static ctl_table pax_table[] = {
84036+ {
84037+ .procname = "softmode",
84038+ .data = &pax_softmode,
84039+ .maxlen = sizeof(unsigned int),
84040+ .mode = 0600,
84041+ .proc_handler = &proc_dointvec,
84042+ },
84043+
84044+ { }
84045+};
84046+#endif
84047+
84048 /* The default sysctl tables: */
84049
84050 static struct ctl_table sysctl_base_table[] = {
84051@@ -273,6 +286,22 @@ static int max_extfrag_threshold = 1000;
84052 #endif
84053
84054 static struct ctl_table kern_table[] = {
84055+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
84056+ {
84057+ .procname = "grsecurity",
84058+ .mode = 0500,
84059+ .child = grsecurity_table,
84060+ },
84061+#endif
84062+
84063+#ifdef CONFIG_PAX_SOFTMODE
84064+ {
84065+ .procname = "pax",
84066+ .mode = 0500,
84067+ .child = pax_table,
84068+ },
84069+#endif
84070+
84071 {
84072 .procname = "sched_child_runs_first",
84073 .data = &sysctl_sched_child_runs_first,
84074@@ -607,7 +636,7 @@ static struct ctl_table kern_table[] = {
84075 .data = &modprobe_path,
84076 .maxlen = KMOD_PATH_LEN,
84077 .mode = 0644,
84078- .proc_handler = proc_dostring,
84079+ .proc_handler = proc_dostring_modpriv,
84080 },
84081 {
84082 .procname = "modules_disabled",
84083@@ -774,16 +803,20 @@ static struct ctl_table kern_table[] = {
84084 .extra1 = &zero,
84085 .extra2 = &one,
84086 },
84087+#endif
84088 {
84089 .procname = "kptr_restrict",
84090 .data = &kptr_restrict,
84091 .maxlen = sizeof(int),
84092 .mode = 0644,
84093 .proc_handler = proc_dointvec_minmax_sysadmin,
84094+#ifdef CONFIG_GRKERNSEC_HIDESYM
84095+ .extra1 = &two,
84096+#else
84097 .extra1 = &zero,
84098+#endif
84099 .extra2 = &two,
84100 },
84101-#endif
84102 {
84103 .procname = "ngroups_max",
84104 .data = &ngroups_max,
84105@@ -1025,10 +1058,17 @@ static struct ctl_table kern_table[] = {
84106 */
84107 {
84108 .procname = "perf_event_paranoid",
84109- .data = &sysctl_perf_event_paranoid,
84110- .maxlen = sizeof(sysctl_perf_event_paranoid),
84111+ .data = &sysctl_perf_event_legitimately_concerned,
84112+ .maxlen = sizeof(sysctl_perf_event_legitimately_concerned),
84113 .mode = 0644,
84114- .proc_handler = proc_dointvec,
84115+ /* go ahead, be a hero */
84116+ .proc_handler = proc_dointvec_minmax_sysadmin,
84117+ .extra1 = &neg_one,
84118+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
84119+ .extra2 = &three,
84120+#else
84121+ .extra2 = &two,
84122+#endif
84123 },
84124 {
84125 .procname = "perf_event_mlock_kb",
84126@@ -1282,6 +1322,13 @@ static struct ctl_table vm_table[] = {
84127 .proc_handler = proc_dointvec_minmax,
84128 .extra1 = &zero,
84129 },
84130+ {
84131+ .procname = "heap_stack_gap",
84132+ .data = &sysctl_heap_stack_gap,
84133+ .maxlen = sizeof(sysctl_heap_stack_gap),
84134+ .mode = 0644,
84135+ .proc_handler = proc_doulongvec_minmax,
84136+ },
84137 #else
84138 {
84139 .procname = "nr_trim_pages",
84140@@ -1746,6 +1793,16 @@ int proc_dostring(struct ctl_table *table, int write,
84141 buffer, lenp, ppos);
84142 }
84143
84144+int proc_dostring_modpriv(struct ctl_table *table, int write,
84145+ void __user *buffer, size_t *lenp, loff_t *ppos)
84146+{
84147+ if (write && !capable(CAP_SYS_MODULE))
84148+ return -EPERM;
84149+
84150+ return _proc_do_string(table->data, table->maxlen, write,
84151+ buffer, lenp, ppos);
84152+}
84153+
84154 static size_t proc_skip_spaces(char **buf)
84155 {
84156 size_t ret;
84157@@ -1851,6 +1908,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
84158 len = strlen(tmp);
84159 if (len > *size)
84160 len = *size;
84161+ if (len > sizeof(tmp))
84162+ len = sizeof(tmp);
84163 if (copy_to_user(*buf, tmp, len))
84164 return -EFAULT;
84165 *size -= len;
84166@@ -2015,7 +2074,7 @@ int proc_dointvec(struct ctl_table *table, int write,
84167 static int proc_taint(struct ctl_table *table, int write,
84168 void __user *buffer, size_t *lenp, loff_t *ppos)
84169 {
84170- struct ctl_table t;
84171+ ctl_table_no_const t;
84172 unsigned long tmptaint = get_taint();
84173 int err;
84174
84175@@ -2043,7 +2102,6 @@ static int proc_taint(struct ctl_table *table, int write,
84176 return err;
84177 }
84178
84179-#ifdef CONFIG_PRINTK
84180 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
84181 void __user *buffer, size_t *lenp, loff_t *ppos)
84182 {
84183@@ -2052,7 +2110,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
84184
84185 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
84186 }
84187-#endif
84188
84189 struct do_proc_dointvec_minmax_conv_param {
84190 int *min;
84191@@ -2199,8 +2256,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
84192 *i = val;
84193 } else {
84194 val = convdiv * (*i) / convmul;
84195- if (!first)
84196+ if (!first) {
84197 err = proc_put_char(&buffer, &left, '\t');
84198+ if (err)
84199+ break;
84200+ }
84201 err = proc_put_long(&buffer, &left, val, false);
84202 if (err)
84203 break;
84204@@ -2592,6 +2652,12 @@ int proc_dostring(struct ctl_table *table, int write,
84205 return -ENOSYS;
84206 }
84207
84208+int proc_dostring_modpriv(struct ctl_table *table, int write,
84209+ void __user *buffer, size_t *lenp, loff_t *ppos)
84210+{
84211+ return -ENOSYS;
84212+}
84213+
84214 int proc_dointvec(struct ctl_table *table, int write,
84215 void __user *buffer, size_t *lenp, loff_t *ppos)
84216 {
84217@@ -2648,5 +2714,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
84218 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
84219 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
84220 EXPORT_SYMBOL(proc_dostring);
84221+EXPORT_SYMBOL(proc_dostring_modpriv);
84222 EXPORT_SYMBOL(proc_doulongvec_minmax);
84223 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
84224diff --git a/kernel/taskstats.c b/kernel/taskstats.c
84225index 145bb4d..b2aa969 100644
84226--- a/kernel/taskstats.c
84227+++ b/kernel/taskstats.c
84228@@ -28,9 +28,12 @@
84229 #include <linux/fs.h>
84230 #include <linux/file.h>
84231 #include <linux/pid_namespace.h>
84232+#include <linux/grsecurity.h>
84233 #include <net/genetlink.h>
84234 #include <linux/atomic.h>
84235
84236+extern int gr_is_taskstats_denied(int pid);
84237+
84238 /*
84239 * Maximum length of a cpumask that can be specified in
84240 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
84241@@ -570,6 +573,9 @@ err:
84242
84243 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
84244 {
84245+ if (gr_is_taskstats_denied(current->pid))
84246+ return -EACCES;
84247+
84248 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
84249 return cmd_attr_register_cpumask(info);
84250 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
84251diff --git a/kernel/time.c b/kernel/time.c
84252index d3617db..c98bbe9 100644
84253--- a/kernel/time.c
84254+++ b/kernel/time.c
84255@@ -172,6 +172,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
84256 return error;
84257
84258 if (tz) {
84259+ /* we log in do_settimeofday called below, so don't log twice
84260+ */
84261+ if (!tv)
84262+ gr_log_timechange();
84263+
84264 sys_tz = *tz;
84265 update_vsyscall_tz();
84266 if (firsttime) {
84267@@ -502,7 +507,7 @@ EXPORT_SYMBOL(usecs_to_jiffies);
84268 * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
84269 * value to a scaled second value.
84270 */
84271-unsigned long
84272+unsigned long __intentional_overflow(-1)
84273 timespec_to_jiffies(const struct timespec *value)
84274 {
84275 unsigned long sec = value->tv_sec;
84276diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
84277index f11d83b..d016d91 100644
84278--- a/kernel/time/alarmtimer.c
84279+++ b/kernel/time/alarmtimer.c
84280@@ -750,7 +750,7 @@ static int __init alarmtimer_init(void)
84281 struct platform_device *pdev;
84282 int error = 0;
84283 int i;
84284- struct k_clock alarm_clock = {
84285+ static struct k_clock alarm_clock = {
84286 .clock_getres = alarm_clock_getres,
84287 .clock_get = alarm_clock_get,
84288 .timer_create = alarm_timer_create,
84289diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
84290index baeeb5c..c22704a 100644
84291--- a/kernel/time/timekeeping.c
84292+++ b/kernel/time/timekeeping.c
84293@@ -15,6 +15,7 @@
84294 #include <linux/init.h>
84295 #include <linux/mm.h>
84296 #include <linux/sched.h>
84297+#include <linux/grsecurity.h>
84298 #include <linux/syscore_ops.h>
84299 #include <linux/clocksource.h>
84300 #include <linux/jiffies.h>
84301@@ -495,6 +496,8 @@ int do_settimeofday(const struct timespec *tv)
84302 if (!timespec_valid_strict(tv))
84303 return -EINVAL;
84304
84305+ gr_log_timechange();
84306+
84307 raw_spin_lock_irqsave(&timekeeper_lock, flags);
84308 write_seqcount_begin(&timekeeper_seq);
84309
84310diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
84311index 3bdf283..cc68d83 100644
84312--- a/kernel/time/timer_list.c
84313+++ b/kernel/time/timer_list.c
84314@@ -45,12 +45,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
84315
84316 static void print_name_offset(struct seq_file *m, void *sym)
84317 {
84318+#ifdef CONFIG_GRKERNSEC_HIDESYM
84319+ SEQ_printf(m, "<%p>", NULL);
84320+#else
84321 char symname[KSYM_NAME_LEN];
84322
84323 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
84324 SEQ_printf(m, "<%pK>", sym);
84325 else
84326 SEQ_printf(m, "%s", symname);
84327+#endif
84328 }
84329
84330 static void
84331@@ -119,7 +123,11 @@ next_one:
84332 static void
84333 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
84334 {
84335+#ifdef CONFIG_GRKERNSEC_HIDESYM
84336+ SEQ_printf(m, " .base: %p\n", NULL);
84337+#else
84338 SEQ_printf(m, " .base: %pK\n", base);
84339+#endif
84340 SEQ_printf(m, " .index: %d\n",
84341 base->index);
84342 SEQ_printf(m, " .resolution: %Lu nsecs\n",
84343@@ -355,7 +363,11 @@ static int __init init_timer_list_procfs(void)
84344 {
84345 struct proc_dir_entry *pe;
84346
84347+#ifdef CONFIG_GRKERNSEC_PROC_ADD
84348+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
84349+#else
84350 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
84351+#endif
84352 if (!pe)
84353 return -ENOMEM;
84354 return 0;
84355diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
84356index 0b537f2..40d6c20 100644
84357--- a/kernel/time/timer_stats.c
84358+++ b/kernel/time/timer_stats.c
84359@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
84360 static unsigned long nr_entries;
84361 static struct entry entries[MAX_ENTRIES];
84362
84363-static atomic_t overflow_count;
84364+static atomic_unchecked_t overflow_count;
84365
84366 /*
84367 * The entries are in a hash-table, for fast lookup:
84368@@ -140,7 +140,7 @@ static void reset_entries(void)
84369 nr_entries = 0;
84370 memset(entries, 0, sizeof(entries));
84371 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
84372- atomic_set(&overflow_count, 0);
84373+ atomic_set_unchecked(&overflow_count, 0);
84374 }
84375
84376 static struct entry *alloc_entry(void)
84377@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
84378 if (likely(entry))
84379 entry->count++;
84380 else
84381- atomic_inc(&overflow_count);
84382+ atomic_inc_unchecked(&overflow_count);
84383
84384 out_unlock:
84385 raw_spin_unlock_irqrestore(lock, flags);
84386@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
84387
84388 static void print_name_offset(struct seq_file *m, unsigned long addr)
84389 {
84390+#ifdef CONFIG_GRKERNSEC_HIDESYM
84391+ seq_printf(m, "<%p>", NULL);
84392+#else
84393 char symname[KSYM_NAME_LEN];
84394
84395 if (lookup_symbol_name(addr, symname) < 0)
84396- seq_printf(m, "<%p>", (void *)addr);
84397+ seq_printf(m, "<%pK>", (void *)addr);
84398 else
84399 seq_printf(m, "%s", symname);
84400+#endif
84401 }
84402
84403 static int tstats_show(struct seq_file *m, void *v)
84404@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
84405
84406 seq_puts(m, "Timer Stats Version: v0.2\n");
84407 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
84408- if (atomic_read(&overflow_count))
84409+ if (atomic_read_unchecked(&overflow_count))
84410 seq_printf(m, "Overflow: %d entries\n",
84411- atomic_read(&overflow_count));
84412+ atomic_read_unchecked(&overflow_count));
84413
84414 for (i = 0; i < nr_entries; i++) {
84415 entry = entries + i;
84416@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
84417 {
84418 struct proc_dir_entry *pe;
84419
84420+#ifdef CONFIG_GRKERNSEC_PROC_ADD
84421+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
84422+#else
84423 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
84424+#endif
84425 if (!pe)
84426 return -ENOMEM;
84427 return 0;
84428diff --git a/kernel/timer.c b/kernel/timer.c
84429index 15bc1b4..32da49c 100644
84430--- a/kernel/timer.c
84431+++ b/kernel/timer.c
84432@@ -1366,7 +1366,7 @@ void update_process_times(int user_tick)
84433 /*
84434 * This function runs timers and the timer-tq in bottom half context.
84435 */
84436-static void run_timer_softirq(struct softirq_action *h)
84437+static void run_timer_softirq(void)
84438 {
84439 struct tvec_base *base = __this_cpu_read(tvec_bases);
84440
84441@@ -1429,7 +1429,7 @@ static void process_timeout(unsigned long __data)
84442 *
84443 * In all cases the return value is guaranteed to be non-negative.
84444 */
84445-signed long __sched schedule_timeout(signed long timeout)
84446+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
84447 {
84448 struct timer_list timer;
84449 unsigned long expire;
84450@@ -1635,7 +1635,7 @@ static int __cpuinit timer_cpu_notify(struct notifier_block *self,
84451 return NOTIFY_OK;
84452 }
84453
84454-static struct notifier_block __cpuinitdata timers_nb = {
84455+static struct notifier_block timers_nb = {
84456 .notifier_call = timer_cpu_notify,
84457 };
84458
84459diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
84460index b8b8560..75b1a09 100644
84461--- a/kernel/trace/blktrace.c
84462+++ b/kernel/trace/blktrace.c
84463@@ -317,7 +317,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
84464 struct blk_trace *bt = filp->private_data;
84465 char buf[16];
84466
84467- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
84468+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
84469
84470 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
84471 }
84472@@ -375,7 +375,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
84473 return 1;
84474
84475 bt = buf->chan->private_data;
84476- atomic_inc(&bt->dropped);
84477+ atomic_inc_unchecked(&bt->dropped);
84478 return 0;
84479 }
84480
84481@@ -476,7 +476,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
84482
84483 bt->dir = dir;
84484 bt->dev = dev;
84485- atomic_set(&bt->dropped, 0);
84486+ atomic_set_unchecked(&bt->dropped, 0);
84487
84488 ret = -EIO;
84489 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
84490diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
e2b79cd1 84491index f23449d..b8cc3a1 100644
bb5f0bf8
AF
84492--- a/kernel/trace/ftrace.c
84493+++ b/kernel/trace/ftrace.c
e2b79cd1 84494@@ -1925,12 +1925,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
bb5f0bf8
AF
84495 if (unlikely(ftrace_disabled))
84496 return 0;
84497
84498+ ret = ftrace_arch_code_modify_prepare();
84499+ FTRACE_WARN_ON(ret);
84500+ if (ret)
84501+ return 0;
84502+
84503 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
84504+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
84505 if (ret) {
84506 ftrace_bug(ret, ip);
84507- return 0;
84508 }
84509- return 1;
84510+ return ret ? 0 : 1;
84511 }
84512
84513 /*
e2b79cd1 84514@@ -3994,8 +3999,10 @@ static int ftrace_process_locs(struct module *mod,
bb5f0bf8
AF
84515 if (!count)
84516 return 0;
84517
84518+ pax_open_kernel();
84519 sort(start, count, sizeof(*start),
84520 ftrace_cmp_ips, ftrace_swap_ips);
84521+ pax_close_kernel();
84522
84523 start_pg = ftrace_allocate_pages(count);
84524 if (!start_pg)
e2b79cd1 84525@@ -4718,8 +4725,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
bb5f0bf8
AF
84526 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
84527
84528 static int ftrace_graph_active;
84529-static struct notifier_block ftrace_suspend_notifier;
84530-
84531 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
84532 {
84533 return 0;
e2b79cd1 84534@@ -4863,6 +4868,10 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
bb5f0bf8
AF
84535 return NOTIFY_DONE;
84536 }
84537
84538+static struct notifier_block ftrace_suspend_notifier = {
84539+ .notifier_call = ftrace_suspend_notifier_call
84540+};
84541+
84542 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
84543 trace_func_graph_ent_t entryfunc)
84544 {
e2b79cd1 84545@@ -4876,7 +4885,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
bb5f0bf8
AF
84546 goto out;
84547 }
84548
84549- ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
84550 register_pm_notifier(&ftrace_suspend_notifier);
84551
84552 ftrace_graph_active++;
84553diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
84554index e444ff8..438b8f4 100644
84555--- a/kernel/trace/ring_buffer.c
84556+++ b/kernel/trace/ring_buffer.c
84557@@ -352,9 +352,9 @@ struct buffer_data_page {
84558 */
84559 struct buffer_page {
84560 struct list_head list; /* list of buffer pages */
84561- local_t write; /* index for next write */
84562+ local_unchecked_t write; /* index for next write */
84563 unsigned read; /* index for next read */
84564- local_t entries; /* entries on this page */
84565+ local_unchecked_t entries; /* entries on this page */
84566 unsigned long real_end; /* real end of data */
84567 struct buffer_data_page *page; /* Actual data page */
84568 };
84569@@ -473,8 +473,8 @@ struct ring_buffer_per_cpu {
84570 unsigned long last_overrun;
84571 local_t entries_bytes;
84572 local_t entries;
84573- local_t overrun;
84574- local_t commit_overrun;
84575+ local_unchecked_t overrun;
84576+ local_unchecked_t commit_overrun;
84577 local_t dropped_events;
84578 local_t committing;
84579 local_t commits;
84580@@ -992,8 +992,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
84581 *
84582 * We add a counter to the write field to denote this.
84583 */
84584- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
84585- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
84586+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
84587+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
84588
84589 /*
84590 * Just make sure we have seen our old_write and synchronize
84591@@ -1021,8 +1021,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
84592 * cmpxchg to only update if an interrupt did not already
84593 * do it for us. If the cmpxchg fails, we don't care.
84594 */
84595- (void)local_cmpxchg(&next_page->write, old_write, val);
84596- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
84597+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
84598+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
84599
84600 /*
84601 * No need to worry about races with clearing out the commit.
84602@@ -1386,12 +1386,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
84603
84604 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
84605 {
84606- return local_read(&bpage->entries) & RB_WRITE_MASK;
84607+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
84608 }
84609
84610 static inline unsigned long rb_page_write(struct buffer_page *bpage)
84611 {
84612- return local_read(&bpage->write) & RB_WRITE_MASK;
84613+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
84614 }
84615
84616 static int
84617@@ -1486,7 +1486,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
84618 * bytes consumed in ring buffer from here.
84619 * Increment overrun to account for the lost events.
84620 */
84621- local_add(page_entries, &cpu_buffer->overrun);
84622+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
84623 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
84624 }
84625
84626@@ -2063,7 +2063,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
84627 * it is our responsibility to update
84628 * the counters.
84629 */
84630- local_add(entries, &cpu_buffer->overrun);
84631+ local_add_unchecked(entries, &cpu_buffer->overrun);
84632 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
84633
84634 /*
84635@@ -2213,7 +2213,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
84636 if (tail == BUF_PAGE_SIZE)
84637 tail_page->real_end = 0;
84638
84639- local_sub(length, &tail_page->write);
84640+ local_sub_unchecked(length, &tail_page->write);
84641 return;
84642 }
84643
84644@@ -2248,7 +2248,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
84645 rb_event_set_padding(event);
84646
84647 /* Set the write back to the previous setting */
84648- local_sub(length, &tail_page->write);
84649+ local_sub_unchecked(length, &tail_page->write);
84650 return;
84651 }
84652
84653@@ -2260,7 +2260,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
84654
84655 /* Set write to end of buffer */
84656 length = (tail + length) - BUF_PAGE_SIZE;
84657- local_sub(length, &tail_page->write);
84658+ local_sub_unchecked(length, &tail_page->write);
84659 }
84660
84661 /*
84662@@ -2286,7 +2286,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
84663 * about it.
84664 */
84665 if (unlikely(next_page == commit_page)) {
84666- local_inc(&cpu_buffer->commit_overrun);
84667+ local_inc_unchecked(&cpu_buffer->commit_overrun);
84668 goto out_reset;
84669 }
84670
84671@@ -2342,7 +2342,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
84672 cpu_buffer->tail_page) &&
84673 (cpu_buffer->commit_page ==
84674 cpu_buffer->reader_page))) {
84675- local_inc(&cpu_buffer->commit_overrun);
84676+ local_inc_unchecked(&cpu_buffer->commit_overrun);
84677 goto out_reset;
84678 }
84679 }
84680@@ -2390,7 +2390,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
84681 length += RB_LEN_TIME_EXTEND;
84682
84683 tail_page = cpu_buffer->tail_page;
84684- write = local_add_return(length, &tail_page->write);
84685+ write = local_add_return_unchecked(length, &tail_page->write);
84686
84687 /* set write to only the index of the write */
84688 write &= RB_WRITE_MASK;
84689@@ -2407,7 +2407,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
84690 kmemcheck_annotate_bitfield(event, bitfield);
84691 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
84692
84693- local_inc(&tail_page->entries);
84694+ local_inc_unchecked(&tail_page->entries);
84695
84696 /*
84697 * If this is the first commit on the page, then update
84698@@ -2440,7 +2440,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
84699
84700 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
84701 unsigned long write_mask =
84702- local_read(&bpage->write) & ~RB_WRITE_MASK;
84703+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
84704 unsigned long event_length = rb_event_length(event);
84705 /*
84706 * This is on the tail page. It is possible that
84707@@ -2450,7 +2450,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
84708 */
84709 old_index += write_mask;
84710 new_index += write_mask;
84711- index = local_cmpxchg(&bpage->write, old_index, new_index);
84712+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
84713 if (index == old_index) {
84714 /* update counters */
84715 local_sub(event_length, &cpu_buffer->entries_bytes);
84716@@ -2842,7 +2842,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
84717
84718 /* Do the likely case first */
84719 if (likely(bpage->page == (void *)addr)) {
84720- local_dec(&bpage->entries);
84721+ local_dec_unchecked(&bpage->entries);
84722 return;
84723 }
84724
84725@@ -2854,7 +2854,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
84726 start = bpage;
84727 do {
84728 if (bpage->page == (void *)addr) {
84729- local_dec(&bpage->entries);
84730+ local_dec_unchecked(&bpage->entries);
84731 return;
84732 }
84733 rb_inc_page(cpu_buffer, &bpage);
84734@@ -3138,7 +3138,7 @@ static inline unsigned long
84735 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
84736 {
84737 return local_read(&cpu_buffer->entries) -
84738- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
84739+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
84740 }
84741
84742 /**
84743@@ -3227,7 +3227,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
84744 return 0;
84745
84746 cpu_buffer = buffer->buffers[cpu];
84747- ret = local_read(&cpu_buffer->overrun);
84748+ ret = local_read_unchecked(&cpu_buffer->overrun);
84749
84750 return ret;
84751 }
84752@@ -3250,7 +3250,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
84753 return 0;
84754
84755 cpu_buffer = buffer->buffers[cpu];
84756- ret = local_read(&cpu_buffer->commit_overrun);
84757+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
84758
84759 return ret;
84760 }
84761@@ -3335,7 +3335,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
84762 /* if you care about this being correct, lock the buffer */
84763 for_each_buffer_cpu(buffer, cpu) {
84764 cpu_buffer = buffer->buffers[cpu];
84765- overruns += local_read(&cpu_buffer->overrun);
84766+ overruns += local_read_unchecked(&cpu_buffer->overrun);
84767 }
84768
84769 return overruns;
84770@@ -3511,8 +3511,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
84771 /*
84772 * Reset the reader page to size zero.
84773 */
84774- local_set(&cpu_buffer->reader_page->write, 0);
84775- local_set(&cpu_buffer->reader_page->entries, 0);
84776+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
84777+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
84778 local_set(&cpu_buffer->reader_page->page->commit, 0);
84779 cpu_buffer->reader_page->real_end = 0;
84780
84781@@ -3546,7 +3546,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
84782 * want to compare with the last_overrun.
84783 */
84784 smp_mb();
84785- overwrite = local_read(&(cpu_buffer->overrun));
84786+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
84787
84788 /*
84789 * Here's the tricky part.
84790@@ -4116,8 +4116,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
84791
84792 cpu_buffer->head_page
84793 = list_entry(cpu_buffer->pages, struct buffer_page, list);
84794- local_set(&cpu_buffer->head_page->write, 0);
84795- local_set(&cpu_buffer->head_page->entries, 0);
84796+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
84797+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
84798 local_set(&cpu_buffer->head_page->page->commit, 0);
84799
84800 cpu_buffer->head_page->read = 0;
84801@@ -4127,14 +4127,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
84802
84803 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
84804 INIT_LIST_HEAD(&cpu_buffer->new_pages);
84805- local_set(&cpu_buffer->reader_page->write, 0);
84806- local_set(&cpu_buffer->reader_page->entries, 0);
84807+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
84808+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
84809 local_set(&cpu_buffer->reader_page->page->commit, 0);
84810 cpu_buffer->reader_page->read = 0;
84811
84812 local_set(&cpu_buffer->entries_bytes, 0);
84813- local_set(&cpu_buffer->overrun, 0);
84814- local_set(&cpu_buffer->commit_overrun, 0);
84815+ local_set_unchecked(&cpu_buffer->overrun, 0);
84816+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
84817 local_set(&cpu_buffer->dropped_events, 0);
84818 local_set(&cpu_buffer->entries, 0);
84819 local_set(&cpu_buffer->committing, 0);
84820@@ -4538,8 +4538,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
84821 rb_init_page(bpage);
84822 bpage = reader->page;
84823 reader->page = *data_page;
84824- local_set(&reader->write, 0);
84825- local_set(&reader->entries, 0);
84826+ local_set_unchecked(&reader->write, 0);
84827+ local_set_unchecked(&reader->entries, 0);
84828 reader->read = 0;
84829 *data_page = bpage;
84830
84831diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
e2b79cd1 84832index 0582a01..310bed1 100644
bb5f0bf8
AF
84833--- a/kernel/trace/trace.c
84834+++ b/kernel/trace/trace.c
e2b79cd1 84835@@ -3327,7 +3327,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
bb5f0bf8
AF
84836 return 0;
84837 }
84838
84839-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
84840+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled)
84841 {
84842 /* do nothing if flag is already set */
84843 if (!!(trace_flags & mask) == !!enabled)
84844diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
84845index 51b4448..7be601f 100644
84846--- a/kernel/trace/trace.h
84847+++ b/kernel/trace/trace.h
84848@@ -1035,7 +1035,7 @@ extern const char *__stop___trace_bprintk_fmt[];
84849 void trace_printk_init_buffers(void);
84850 void trace_printk_start_comm(void);
84851 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
84852-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
84853+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled);
84854
84855 /*
84856 * Normal trace_printk() and friends allocates special buffers
84857diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
e2b79cd1 84858index 3d18aad..d1be0eb 100644
bb5f0bf8
AF
84859--- a/kernel/trace/trace_events.c
84860+++ b/kernel/trace/trace_events.c
e2b79cd1 84861@@ -1794,10 +1794,6 @@ static LIST_HEAD(ftrace_module_file_list);
bb5f0bf8
AF
84862 struct ftrace_module_file_ops {
84863 struct list_head list;
84864 struct module *mod;
84865- struct file_operations id;
84866- struct file_operations enable;
84867- struct file_operations format;
84868- struct file_operations filter;
84869 };
84870
84871 static struct ftrace_module_file_ops *
e2b79cd1 84872@@ -1838,17 +1834,12 @@ trace_create_file_ops(struct module *mod)
bb5f0bf8
AF
84873
84874 file_ops->mod = mod;
84875
84876- file_ops->id = ftrace_event_id_fops;
84877- file_ops->id.owner = mod;
84878-
84879- file_ops->enable = ftrace_enable_fops;
84880- file_ops->enable.owner = mod;
84881-
84882- file_ops->filter = ftrace_event_filter_fops;
84883- file_ops->filter.owner = mod;
84884-
84885- file_ops->format = ftrace_event_format_fops;
84886- file_ops->format.owner = mod;
84887+ pax_open_kernel();
84888+ mod->trace_id.owner = mod;
84889+ mod->trace_enable.owner = mod;
84890+ mod->trace_filter.owner = mod;
84891+ mod->trace_format.owner = mod;
84892+ pax_close_kernel();
84893
84894 list_add(&file_ops->list, &ftrace_module_file_list);
84895
e2b79cd1 84896@@ -1941,8 +1932,8 @@ __trace_add_new_mod_event(struct ftrace_event_call *call,
bb5f0bf8
AF
84897 struct ftrace_module_file_ops *file_ops)
84898 {
84899 return __trace_add_new_event(call, tr,
84900- &file_ops->id, &file_ops->enable,
84901- &file_ops->filter, &file_ops->format);
84902+ &file_ops->mod->trace_id, &file_ops->mod->trace_enable,
84903+ &file_ops->mod->trace_filter, &file_ops->mod->trace_format);
84904 }
84905
84906 #else
84907diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
84908index a5e8f48..a9690d2 100644
84909--- a/kernel/trace/trace_mmiotrace.c
84910+++ b/kernel/trace/trace_mmiotrace.c
84911@@ -24,7 +24,7 @@ struct header_iter {
84912 static struct trace_array *mmio_trace_array;
84913 static bool overrun_detected;
84914 static unsigned long prev_overruns;
84915-static atomic_t dropped_count;
84916+static atomic_unchecked_t dropped_count;
84917
84918 static void mmio_reset_data(struct trace_array *tr)
84919 {
84920@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
84921
84922 static unsigned long count_overruns(struct trace_iterator *iter)
84923 {
84924- unsigned long cnt = atomic_xchg(&dropped_count, 0);
84925+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
84926 unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
84927
84928 if (over > prev_overruns)
84929@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
84930 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
84931 sizeof(*entry), 0, pc);
84932 if (!event) {
84933- atomic_inc(&dropped_count);
84934+ atomic_inc_unchecked(&dropped_count);
84935 return;
84936 }
84937 entry = ring_buffer_event_data(event);
84938@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
84939 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
84940 sizeof(*entry), 0, pc);
84941 if (!event) {
84942- atomic_inc(&dropped_count);
84943+ atomic_inc_unchecked(&dropped_count);
84944 return;
84945 }
84946 entry = ring_buffer_event_data(event);
84947diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
84948index bb922d9..2a54a257 100644
84949--- a/kernel/trace/trace_output.c
84950+++ b/kernel/trace/trace_output.c
84951@@ -294,7 +294,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
84952
84953 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
84954 if (!IS_ERR(p)) {
84955- p = mangle_path(s->buffer + s->len, p, "\n");
84956+ p = mangle_path(s->buffer + s->len, p, "\n\\");
84957 if (p) {
84958 s->len = p - s->buffer;
84959 return 1;
84960@@ -893,14 +893,16 @@ int register_ftrace_event(struct trace_event *event)
84961 goto out;
84962 }
84963
84964+ pax_open_kernel();
84965 if (event->funcs->trace == NULL)
84966- event->funcs->trace = trace_nop_print;
84967+ *(void **)&event->funcs->trace = trace_nop_print;
84968 if (event->funcs->raw == NULL)
84969- event->funcs->raw = trace_nop_print;
84970+ *(void **)&event->funcs->raw = trace_nop_print;
84971 if (event->funcs->hex == NULL)
84972- event->funcs->hex = trace_nop_print;
84973+ *(void **)&event->funcs->hex = trace_nop_print;
84974 if (event->funcs->binary == NULL)
84975- event->funcs->binary = trace_nop_print;
84976+ *(void **)&event->funcs->binary = trace_nop_print;
84977+ pax_close_kernel();
84978
84979 key = event->type & (EVENT_HASHSIZE - 1);
84980
84981diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
84982index b20428c..4845a10 100644
84983--- a/kernel/trace/trace_stack.c
84984+++ b/kernel/trace/trace_stack.c
84985@@ -68,7 +68,7 @@ check_stack(unsigned long ip, unsigned long *stack)
84986 return;
84987
84988 /* we do not handle interrupt stacks yet */
84989- if (!object_is_on_stack(stack))
84990+ if (!object_starts_on_stack(stack))
84991 return;
84992
84993 local_irq_save(flags);
84994diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
84995index 9064b91..1f5d2f8 100644
84996--- a/kernel/user_namespace.c
84997+++ b/kernel/user_namespace.c
84998@@ -82,6 +82,21 @@ int create_user_ns(struct cred *new)
84999 !kgid_has_mapping(parent_ns, group))
85000 return -EPERM;
85001
85002+#ifdef CONFIG_GRKERNSEC
85003+ /*
85004+ * This doesn't really inspire confidence:
85005+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
85006+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
85007+ * Increases kernel attack surface in areas developers
85008+ * previously cared little about ("low importance due
85009+ * to requiring "root" capability")
85010+ * To be removed when this code receives *proper* review
85011+ */
85012+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
85013+ !capable(CAP_SETGID))
85014+ return -EPERM;
85015+#endif
85016+
85017 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
85018 if (!ns)
85019 return -ENOMEM;
85020@@ -862,7 +877,7 @@ static int userns_install(struct nsproxy *nsproxy, void *ns)
85021 if (atomic_read(&current->mm->mm_users) > 1)
85022 return -EINVAL;
85023
85024- if (current->fs->users != 1)
85025+ if (atomic_read(&current->fs->users) != 1)
85026 return -EINVAL;
85027
85028 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
85029diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
85030index 4f69f9a..7c6f8f8 100644
85031--- a/kernel/utsname_sysctl.c
85032+++ b/kernel/utsname_sysctl.c
85033@@ -47,7 +47,7 @@ static void put_uts(ctl_table *table, int write, void *which)
85034 static int proc_do_uts_string(ctl_table *table, int write,
85035 void __user *buffer, size_t *lenp, loff_t *ppos)
85036 {
85037- struct ctl_table uts_table;
85038+ ctl_table_no_const uts_table;
85039 int r;
85040 memcpy(&uts_table, table, sizeof(uts_table));
85041 uts_table.data = get_uts(table, write);
85042diff --git a/kernel/watchdog.c b/kernel/watchdog.c
85043index 05039e3..17490c7 100644
85044--- a/kernel/watchdog.c
85045+++ b/kernel/watchdog.c
85046@@ -531,7 +531,7 @@ int proc_dowatchdog(struct ctl_table *table, int write,
85047 }
85048 #endif /* CONFIG_SYSCTL */
85049
85050-static struct smp_hotplug_thread watchdog_threads = {
85051+static struct smp_hotplug_thread watchdog_threads __read_only = {
85052 .store = &softlockup_watchdog,
85053 .thread_should_run = watchdog_should_run,
85054 .thread_fn = watchdog,
85055diff --git a/kernel/workqueue.c b/kernel/workqueue.c
85056index 6f01921..139869b 100644
85057--- a/kernel/workqueue.c
85058+++ b/kernel/workqueue.c
85059@@ -4596,7 +4596,7 @@ static void rebind_workers(struct worker_pool *pool)
85060 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
85061 worker_flags |= WORKER_REBOUND;
85062 worker_flags &= ~WORKER_UNBOUND;
85063- ACCESS_ONCE(worker->flags) = worker_flags;
85064+ ACCESS_ONCE_RW(worker->flags) = worker_flags;
85065 }
85066
85067 spin_unlock_irq(&pool->lock);
85068diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
85069index 74fdc5c..3310593 100644
85070--- a/lib/Kconfig.debug
85071+++ b/lib/Kconfig.debug
85072@@ -549,7 +549,7 @@ config DEBUG_MUTEXES
85073
85074 config DEBUG_LOCK_ALLOC
85075 bool "Lock debugging: detect incorrect freeing of live locks"
85076- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
85077+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
85078 select DEBUG_SPINLOCK
85079 select DEBUG_MUTEXES
85080 select LOCKDEP
85081@@ -563,7 +563,7 @@ config DEBUG_LOCK_ALLOC
85082
85083 config PROVE_LOCKING
85084 bool "Lock debugging: prove locking correctness"
85085- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
85086+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
85087 select LOCKDEP
85088 select DEBUG_SPINLOCK
85089 select DEBUG_MUTEXES
85090@@ -614,7 +614,7 @@ config LOCKDEP
85091
85092 config LOCK_STAT
85093 bool "Lock usage statistics"
85094- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
85095+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
85096 select LOCKDEP
85097 select DEBUG_SPINLOCK
85098 select DEBUG_MUTEXES
85099@@ -1282,6 +1282,7 @@ config LATENCYTOP
85100 depends on DEBUG_KERNEL
85101 depends on STACKTRACE_SUPPORT
85102 depends on PROC_FS
85103+ depends on !GRKERNSEC_HIDESYM
85104 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
85105 select KALLSYMS
85106 select KALLSYMS_ALL
85107@@ -1298,7 +1299,7 @@ config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
85108 config DEBUG_STRICT_USER_COPY_CHECKS
85109 bool "Strict user copy size checks"
85110 depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
85111- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
85112+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
85113 help
85114 Enabling this option turns a certain set of sanity checks for user
85115 copy operations into compile time failures.
85116@@ -1328,7 +1329,7 @@ config INTERVAL_TREE_TEST
85117
85118 config PROVIDE_OHCI1394_DMA_INIT
85119 bool "Remote debugging over FireWire early on boot"
85120- depends on PCI && X86
85121+ depends on PCI && X86 && !GRKERNSEC
85122 help
85123 If you want to debug problems which hang or crash the kernel early
85124 on boot and the crashing machine has a FireWire port, you can use
85125@@ -1357,7 +1358,7 @@ config PROVIDE_OHCI1394_DMA_INIT
85126
85127 config FIREWIRE_OHCI_REMOTE_DMA
85128 bool "Remote debugging over FireWire with firewire-ohci"
85129- depends on FIREWIRE_OHCI
85130+ depends on FIREWIRE_OHCI && !GRKERNSEC
85131 help
85132 This option lets you use the FireWire bus for remote debugging
85133 with help of the firewire-ohci driver. It enables unfiltered
85134diff --git a/lib/Makefile b/lib/Makefile
85135index c55a037..fb46e3b 100644
85136--- a/lib/Makefile
85137+++ b/lib/Makefile
85138@@ -50,7 +50,7 @@ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
85139
85140 obj-$(CONFIG_BTREE) += btree.o
85141 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
85142-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
85143+obj-y += list_debug.o
85144 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
85145
85146 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
85147diff --git a/lib/bitmap.c b/lib/bitmap.c
85148index 06f7e4f..f3cf2b0 100644
85149--- a/lib/bitmap.c
85150+++ b/lib/bitmap.c
85151@@ -422,7 +422,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
85152 {
85153 int c, old_c, totaldigits, ndigits, nchunks, nbits;
85154 u32 chunk;
85155- const char __user __force *ubuf = (const char __user __force *)buf;
85156+ const char __user *ubuf = (const char __force_user *)buf;
85157
85158 bitmap_zero(maskp, nmaskbits);
85159
85160@@ -507,7 +507,7 @@ int bitmap_parse_user(const char __user *ubuf,
85161 {
85162 if (!access_ok(VERIFY_READ, ubuf, ulen))
85163 return -EFAULT;
85164- return __bitmap_parse((const char __force *)ubuf,
85165+ return __bitmap_parse((const char __force_kernel *)ubuf,
85166 ulen, 1, maskp, nmaskbits);
85167
85168 }
85169@@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
85170 {
85171 unsigned a, b;
85172 int c, old_c, totaldigits;
85173- const char __user __force *ubuf = (const char __user __force *)buf;
85174+ const char __user *ubuf = (const char __force_user *)buf;
85175 int exp_digit, in_range;
85176
85177 totaldigits = c = 0;
85178@@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
85179 {
85180 if (!access_ok(VERIFY_READ, ubuf, ulen))
85181 return -EFAULT;
85182- return __bitmap_parselist((const char __force *)ubuf,
85183+ return __bitmap_parselist((const char __force_kernel *)ubuf,
85184 ulen, 1, maskp, nmaskbits);
85185 }
85186 EXPORT_SYMBOL(bitmap_parselist_user);
85187diff --git a/lib/bug.c b/lib/bug.c
85188index 1686034..a9c00c8 100644
85189--- a/lib/bug.c
85190+++ b/lib/bug.c
85191@@ -134,6 +134,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
85192 return BUG_TRAP_TYPE_NONE;
85193
85194 bug = find_bug(bugaddr);
85195+ if (!bug)
85196+ return BUG_TRAP_TYPE_NONE;
85197
85198 file = NULL;
85199 line = 0;
85200diff --git a/lib/debugobjects.c b/lib/debugobjects.c
85201index 37061ed..da83f48 100644
85202--- a/lib/debugobjects.c
85203+++ b/lib/debugobjects.c
85204@@ -286,7 +286,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
85205 if (limit > 4)
85206 return;
85207
85208- is_on_stack = object_is_on_stack(addr);
85209+ is_on_stack = object_starts_on_stack(addr);
85210 if (is_on_stack == onstack)
85211 return;
85212
85213diff --git a/lib/devres.c b/lib/devres.c
85214index 8235331..5881053 100644
85215--- a/lib/devres.c
85216+++ b/lib/devres.c
85217@@ -81,7 +81,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
85218 void devm_iounmap(struct device *dev, void __iomem *addr)
85219 {
85220 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
85221- (void *)addr));
85222+ (void __force *)addr));
85223 iounmap(addr);
85224 }
85225 EXPORT_SYMBOL(devm_iounmap);
85226@@ -224,7 +224,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
85227 {
85228 ioport_unmap(addr);
85229 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
85230- devm_ioport_map_match, (void *)addr));
85231+ devm_ioport_map_match, (void __force *)addr));
85232 }
85233 EXPORT_SYMBOL(devm_ioport_unmap);
85234 #endif /* CONFIG_HAS_IOPORT */
85235diff --git a/lib/div64.c b/lib/div64.c
85236index a163b6c..9618fa5 100644
85237--- a/lib/div64.c
85238+++ b/lib/div64.c
85239@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
85240 EXPORT_SYMBOL(__div64_32);
85241
85242 #ifndef div_s64_rem
85243-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
85244+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
85245 {
85246 u64 quotient;
85247
85248@@ -90,7 +90,7 @@ EXPORT_SYMBOL(div_s64_rem);
85249 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
85250 */
85251 #ifndef div64_u64
85252-u64 div64_u64(u64 dividend, u64 divisor)
85253+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
85254 {
85255 u32 high = divisor >> 32;
85256 u64 quot;
85257diff --git a/lib/dma-debug.c b/lib/dma-debug.c
85258index d87a17a..ac0d79a 100644
85259--- a/lib/dma-debug.c
85260+++ b/lib/dma-debug.c
85261@@ -768,7 +768,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
85262
85263 void dma_debug_add_bus(struct bus_type *bus)
85264 {
85265- struct notifier_block *nb;
85266+ notifier_block_no_const *nb;
85267
85268 if (global_disable)
85269 return;
85270@@ -945,7 +945,7 @@ static void check_unmap(struct dma_debug_entry *ref)
85271
85272 static void check_for_stack(struct device *dev, void *addr)
85273 {
85274- if (object_is_on_stack(addr))
85275+ if (object_starts_on_stack(addr))
85276 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
85277 "stack [addr=%p]\n", addr);
85278 }
85279diff --git a/lib/inflate.c b/lib/inflate.c
85280index 013a761..c28f3fc 100644
85281--- a/lib/inflate.c
85282+++ b/lib/inflate.c
85283@@ -269,7 +269,7 @@ static void free(void *where)
85284 malloc_ptr = free_mem_ptr;
85285 }
85286 #else
85287-#define malloc(a) kmalloc(a, GFP_KERNEL)
85288+#define malloc(a) kmalloc((a), GFP_KERNEL)
85289 #define free(a) kfree(a)
85290 #endif
85291
85292diff --git a/lib/ioremap.c b/lib/ioremap.c
85293index 0c9216c..863bd89 100644
85294--- a/lib/ioremap.c
85295+++ b/lib/ioremap.c
85296@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
85297 unsigned long next;
85298
85299 phys_addr -= addr;
85300- pmd = pmd_alloc(&init_mm, pud, addr);
85301+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
85302 if (!pmd)
85303 return -ENOMEM;
85304 do {
85305@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
85306 unsigned long next;
85307
85308 phys_addr -= addr;
85309- pud = pud_alloc(&init_mm, pgd, addr);
85310+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
85311 if (!pud)
85312 return -ENOMEM;
85313 do {
85314diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
85315index bd2bea9..6b3c95e 100644
85316--- a/lib/is_single_threaded.c
85317+++ b/lib/is_single_threaded.c
85318@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
85319 struct task_struct *p, *t;
85320 bool ret;
85321
85322+ if (!mm)
85323+ return true;
85324+
85325 if (atomic_read(&task->signal->live) != 1)
85326 return false;
85327
85328diff --git a/lib/kobject.c b/lib/kobject.c
85329index b7e29a6..2f3ca75 100644
85330--- a/lib/kobject.c
85331+++ b/lib/kobject.c
85332@@ -805,7 +805,7 @@ static struct kset *kset_create(const char *name,
85333 kset = kzalloc(sizeof(*kset), GFP_KERNEL);
85334 if (!kset)
85335 return NULL;
85336- retval = kobject_set_name(&kset->kobj, name);
85337+ retval = kobject_set_name(&kset->kobj, "%s", name);
85338 if (retval) {
85339 kfree(kset);
85340 return NULL;
85341@@ -859,9 +859,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
85342
85343
85344 static DEFINE_SPINLOCK(kobj_ns_type_lock);
85345-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
85346+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
85347
85348-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
85349+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
85350 {
85351 enum kobj_ns_type type = ops->type;
85352 int error;
85353diff --git a/lib/list_debug.c b/lib/list_debug.c
85354index c24c2f7..06e070b 100644
85355--- a/lib/list_debug.c
85356+++ b/lib/list_debug.c
85357@@ -11,7 +11,9 @@
85358 #include <linux/bug.h>
85359 #include <linux/kernel.h>
85360 #include <linux/rculist.h>
85361+#include <linux/mm.h>
85362
85363+#ifdef CONFIG_DEBUG_LIST
85364 /*
85365 * Insert a new entry between two known consecutive entries.
85366 *
85367@@ -19,21 +21,32 @@
85368 * the prev/next entries already!
85369 */
85370
85371-void __list_add(struct list_head *new,
85372- struct list_head *prev,
85373- struct list_head *next)
85374+static bool __list_add_debug(struct list_head *new,
85375+ struct list_head *prev,
85376+ struct list_head *next)
85377 {
85378- WARN(next->prev != prev,
85379+ if (WARN(next->prev != prev,
85380 "list_add corruption. next->prev should be "
85381 "prev (%p), but was %p. (next=%p).\n",
85382- prev, next->prev, next);
85383- WARN(prev->next != next,
85384+ prev, next->prev, next) ||
85385+ WARN(prev->next != next,
85386 "list_add corruption. prev->next should be "
85387 "next (%p), but was %p. (prev=%p).\n",
85388- next, prev->next, prev);
85389- WARN(new == prev || new == next,
85390- "list_add double add: new=%p, prev=%p, next=%p.\n",
85391- new, prev, next);
85392+ next, prev->next, prev) ||
85393+ WARN(new == prev || new == next,
85394+ "list_add double add: new=%p, prev=%p, next=%p.\n",
85395+ new, prev, next))
85396+ return false;
85397+ return true;
85398+}
85399+
85400+void __list_add(struct list_head *new,
85401+ struct list_head *prev,
85402+ struct list_head *next)
85403+{
85404+ if (!__list_add_debug(new, prev, next))
85405+ return;
85406+
85407 next->prev = new;
85408 new->next = next;
85409 new->prev = prev;
85410@@ -41,7 +54,7 @@ void __list_add(struct list_head *new,
85411 }
85412 EXPORT_SYMBOL(__list_add);
85413
85414-void __list_del_entry(struct list_head *entry)
85415+static bool __list_del_entry_debug(struct list_head *entry)
85416 {
85417 struct list_head *prev, *next;
85418
85419@@ -60,9 +73,16 @@ void __list_del_entry(struct list_head *entry)
85420 WARN(next->prev != entry,
85421 "list_del corruption. next->prev should be %p, "
85422 "but was %p\n", entry, next->prev))
85423+ return false;
85424+ return true;
85425+}
85426+
85427+void __list_del_entry(struct list_head *entry)
85428+{
85429+ if (!__list_del_entry_debug(entry))
85430 return;
85431
85432- __list_del(prev, next);
85433+ __list_del(entry->prev, entry->next);
85434 }
85435 EXPORT_SYMBOL(__list_del_entry);
85436
85437@@ -86,15 +106,85 @@ EXPORT_SYMBOL(list_del);
85438 void __list_add_rcu(struct list_head *new,
85439 struct list_head *prev, struct list_head *next)
85440 {
85441- WARN(next->prev != prev,
85442- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
85443- prev, next->prev, next);
85444- WARN(prev->next != next,
85445- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
85446- next, prev->next, prev);
85447+ if (!__list_add_debug(new, prev, next))
85448+ return;
85449+
85450 new->next = next;
85451 new->prev = prev;
85452 rcu_assign_pointer(list_next_rcu(prev), new);
85453 next->prev = new;
85454 }
85455 EXPORT_SYMBOL(__list_add_rcu);
85456+#endif
85457+
85458+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
85459+{
85460+#ifdef CONFIG_DEBUG_LIST
85461+ if (!__list_add_debug(new, prev, next))
85462+ return;
85463+#endif
85464+
85465+ pax_open_kernel();
85466+ next->prev = new;
85467+ new->next = next;
85468+ new->prev = prev;
85469+ prev->next = new;
85470+ pax_close_kernel();
85471+}
85472+EXPORT_SYMBOL(__pax_list_add);
85473+
85474+void pax_list_del(struct list_head *entry)
85475+{
85476+#ifdef CONFIG_DEBUG_LIST
85477+ if (!__list_del_entry_debug(entry))
85478+ return;
85479+#endif
85480+
85481+ pax_open_kernel();
85482+ __list_del(entry->prev, entry->next);
85483+ entry->next = LIST_POISON1;
85484+ entry->prev = LIST_POISON2;
85485+ pax_close_kernel();
85486+}
85487+EXPORT_SYMBOL(pax_list_del);
85488+
85489+void pax_list_del_init(struct list_head *entry)
85490+{
85491+ pax_open_kernel();
85492+ __list_del(entry->prev, entry->next);
85493+ INIT_LIST_HEAD(entry);
85494+ pax_close_kernel();
85495+}
85496+EXPORT_SYMBOL(pax_list_del_init);
85497+
85498+void __pax_list_add_rcu(struct list_head *new,
85499+ struct list_head *prev, struct list_head *next)
85500+{
85501+#ifdef CONFIG_DEBUG_LIST
85502+ if (!__list_add_debug(new, prev, next))
85503+ return;
85504+#endif
85505+
85506+ pax_open_kernel();
85507+ new->next = next;
85508+ new->prev = prev;
85509+ rcu_assign_pointer(list_next_rcu(prev), new);
85510+ next->prev = new;
85511+ pax_close_kernel();
85512+}
85513+EXPORT_SYMBOL(__pax_list_add_rcu);
85514+
85515+void pax_list_del_rcu(struct list_head *entry)
85516+{
85517+#ifdef CONFIG_DEBUG_LIST
85518+ if (!__list_del_entry_debug(entry))
85519+ return;
85520+#endif
85521+
85522+ pax_open_kernel();
85523+ __list_del(entry->prev, entry->next);
85524+ entry->next = LIST_POISON1;
85525+ entry->prev = LIST_POISON2;
85526+ pax_close_kernel();
85527+}
85528+EXPORT_SYMBOL(pax_list_del_rcu);
85529diff --git a/lib/radix-tree.c b/lib/radix-tree.c
85530index e796429..6e38f9f 100644
85531--- a/lib/radix-tree.c
85532+++ b/lib/radix-tree.c
85533@@ -92,7 +92,7 @@ struct radix_tree_preload {
85534 int nr;
85535 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
85536 };
85537-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
85538+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
85539
85540 static inline void *ptr_to_indirect(void *ptr)
85541 {
85542diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
85543index bb2b201..46abaf9 100644
85544--- a/lib/strncpy_from_user.c
85545+++ b/lib/strncpy_from_user.c
85546@@ -21,7 +21,7 @@
85547 */
85548 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
85549 {
85550- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
85551+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
85552 long res = 0;
85553
85554 /*
85555diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
85556index a28df52..3d55877 100644
85557--- a/lib/strnlen_user.c
85558+++ b/lib/strnlen_user.c
85559@@ -26,7 +26,7 @@
85560 */
85561 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
85562 {
85563- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
85564+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
85565 long align, res = 0;
85566 unsigned long c;
85567
85568diff --git a/lib/swiotlb.c b/lib/swiotlb.c
85569index d23762e..e21eab2 100644
85570--- a/lib/swiotlb.c
85571+++ b/lib/swiotlb.c
85572@@ -664,7 +664,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
85573
85574 void
85575 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
85576- dma_addr_t dev_addr)
85577+ dma_addr_t dev_addr, struct dma_attrs *attrs)
85578 {
85579 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
85580
85581diff --git a/lib/usercopy.c b/lib/usercopy.c
85582index 4f5b1dd..7cab418 100644
85583--- a/lib/usercopy.c
85584+++ b/lib/usercopy.c
85585@@ -7,3 +7,9 @@ void copy_from_user_overflow(void)
85586 WARN(1, "Buffer overflow detected!\n");
85587 }
85588 EXPORT_SYMBOL(copy_from_user_overflow);
85589+
85590+void copy_to_user_overflow(void)
85591+{
85592+ WARN(1, "Buffer overflow detected!\n");
85593+}
85594+EXPORT_SYMBOL(copy_to_user_overflow);
85595diff --git a/lib/vsprintf.c b/lib/vsprintf.c
85596index e149c64..24aa71a 100644
85597--- a/lib/vsprintf.c
85598+++ b/lib/vsprintf.c
85599@@ -16,6 +16,9 @@
85600 * - scnprintf and vscnprintf
85601 */
85602
85603+#ifdef CONFIG_GRKERNSEC_HIDESYM
85604+#define __INCLUDED_BY_HIDESYM 1
85605+#endif
85606 #include <stdarg.h>
85607 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
85608 #include <linux/types.h>
85609@@ -981,7 +984,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
85610 return number(buf, end, *(const netdev_features_t *)addr, spec);
85611 }
85612
85613+#ifdef CONFIG_GRKERNSEC_HIDESYM
85614+int kptr_restrict __read_mostly = 2;
85615+#else
85616 int kptr_restrict __read_mostly;
85617+#endif
85618
85619 /*
85620 * Show a '%p' thing. A kernel extension is that the '%p' is followed
85621@@ -994,6 +1001,7 @@ int kptr_restrict __read_mostly;
85622 * - 'f' For simple symbolic function names without offset
85623 * - 'S' For symbolic direct pointers with offset
85624 * - 's' For symbolic direct pointers without offset
85625+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
85626 * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
85627 * - 'B' For backtraced symbolic direct pointers with offset
85628 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
85629@@ -1052,12 +1060,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
85630
85631 if (!ptr && *fmt != 'K') {
85632 /*
85633- * Print (null) with the same width as a pointer so it makes
85634+ * Print (nil) with the same width as a pointer so it makes
85635 * tabular output look nice.
85636 */
85637 if (spec.field_width == -1)
85638 spec.field_width = default_width;
85639- return string(buf, end, "(null)", spec);
85640+ return string(buf, end, "(nil)", spec);
85641 }
85642
85643 switch (*fmt) {
85644@@ -1067,6 +1075,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
85645 /* Fallthrough */
85646 case 'S':
85647 case 's':
85648+#ifdef CONFIG_GRKERNSEC_HIDESYM
85649+ break;
85650+#else
85651+ return symbol_string(buf, end, ptr, spec, fmt);
85652+#endif
85653+ case 'A':
85654 case 'B':
85655 return symbol_string(buf, end, ptr, spec, fmt);
85656 case 'R':
85657@@ -1107,6 +1121,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
85658 va_end(va);
85659 return buf;
85660 }
85661+ case 'P':
85662+ break;
85663 case 'K':
85664 /*
85665 * %pK cannot be used in IRQ context because its test
85666@@ -1136,6 +1152,21 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
85667 return number(buf, end,
85668 (unsigned long long) *((phys_addr_t *)ptr), spec);
85669 }
85670+
85671+#ifdef CONFIG_GRKERNSEC_HIDESYM
85672+ /* 'P' = approved pointers to copy to userland,
85673+ as in the /proc/kallsyms case, as we make it display nothing
85674+ for non-root users, and the real contents for root users
85675+ Also ignore 'K' pointers, since we force their NULLing for non-root users
85676+ above
85677+ */
85678+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'K' && is_usercopy_object(buf)) {
85679+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
85680+ dump_stack();
85681+ ptr = NULL;
85682+ }
85683+#endif
85684+
85685 spec.flags |= SMALL;
85686 if (spec.field_width == -1) {
85687 spec.field_width = default_width;
85688@@ -1857,11 +1888,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
85689 typeof(type) value; \
85690 if (sizeof(type) == 8) { \
85691 args = PTR_ALIGN(args, sizeof(u32)); \
85692- *(u32 *)&value = *(u32 *)args; \
85693- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
85694+ *(u32 *)&value = *(const u32 *)args; \
85695+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
85696 } else { \
85697 args = PTR_ALIGN(args, sizeof(type)); \
85698- value = *(typeof(type) *)args; \
85699+ value = *(const typeof(type) *)args; \
85700 } \
85701 args += sizeof(type); \
85702 value; \
85703@@ -1924,7 +1955,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
85704 case FORMAT_TYPE_STR: {
85705 const char *str_arg = args;
85706 args += strlen(str_arg) + 1;
85707- str = string(str, end, (char *)str_arg, spec);
85708+ str = string(str, end, str_arg, spec);
85709 break;
85710 }
85711
85712diff --git a/localversion-grsec b/localversion-grsec
85713new file mode 100644
85714index 0000000..7cd6065
85715--- /dev/null
85716+++ b/localversion-grsec
85717@@ -0,0 +1 @@
85718+-grsec
85719diff --git a/mm/Kconfig b/mm/Kconfig
85720index e742d06..c56fdd8 100644
85721--- a/mm/Kconfig
85722+++ b/mm/Kconfig
85723@@ -317,10 +317,10 @@ config KSM
85724 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
85725
85726 config DEFAULT_MMAP_MIN_ADDR
85727- int "Low address space to protect from user allocation"
85728+ int "Low address space to protect from user allocation"
85729 depends on MMU
85730- default 4096
85731- help
85732+ default 65536
85733+ help
85734 This is the portion of low virtual memory which should be protected
85735 from userspace allocation. Keeping a user from writing to low pages
85736 can help reduce the impact of kernel NULL pointer bugs.
85737@@ -351,7 +351,7 @@ config MEMORY_FAILURE
85738
85739 config HWPOISON_INJECT
85740 tristate "HWPoison pages injector"
85741- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
85742+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
85743 select PROC_PAGE_MONITOR
85744
85745 config NOMMU_INITIAL_TRIM_EXCESS
85746diff --git a/mm/backing-dev.c b/mm/backing-dev.c
85747index 5025174..9d67dcd 100644
85748--- a/mm/backing-dev.c
85749+++ b/mm/backing-dev.c
85750@@ -12,7 +12,7 @@
85751 #include <linux/device.h>
85752 #include <trace/events/writeback.h>
85753
85754-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
85755+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
85756
85757 struct backing_dev_info default_backing_dev_info = {
85758 .name = "default",
85759@@ -515,7 +515,6 @@ EXPORT_SYMBOL(bdi_destroy);
85760 int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
85761 unsigned int cap)
85762 {
85763- char tmp[32];
85764 int err;
85765
85766 bdi->name = name;
85767@@ -524,8 +523,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
85768 if (err)
85769 return err;
85770
85771- sprintf(tmp, "%.28s%s", name, "-%d");
85772- err = bdi_register(bdi, NULL, tmp, atomic_long_inc_return(&bdi_seq));
85773+ err = bdi_register(bdi, NULL, "%.28s-%ld", name, atomic_long_inc_return_unchecked(&bdi_seq));
85774 if (err) {
85775 bdi_destroy(bdi);
85776 return err;
85777diff --git a/mm/filemap.c b/mm/filemap.c
e2b79cd1 85778index 7905fe7..f59502b 100644
bb5f0bf8
AF
85779--- a/mm/filemap.c
85780+++ b/mm/filemap.c
85781@@ -1766,7 +1766,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
85782 struct address_space *mapping = file->f_mapping;
85783
85784 if (!mapping->a_ops->readpage)
85785- return -ENOEXEC;
85786+ return -ENODEV;
85787 file_accessed(file);
85788 vma->vm_ops = &generic_file_vm_ops;
85789 return 0;
e2b79cd1
AF
85790@@ -1948,7 +1948,7 @@ static size_t __iovec_copy_from_user_inatomic(char *vaddr,
85791
85792 while (bytes) {
85793 char __user *buf = iov->iov_base + base;
85794- int copy = min(bytes, iov->iov_len - base);
85795+ size_t copy = min(bytes, iov->iov_len - base);
85796
85797 base = 0;
85798 left = __copy_from_user_inatomic(vaddr, buf, copy);
85799@@ -1977,7 +1977,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
85800 BUG_ON(!in_atomic());
85801 kaddr = kmap_atomic(page);
85802 if (likely(i->nr_segs == 1)) {
85803- int left;
85804+ size_t left;
85805 char __user *buf = i->iov->iov_base + i->iov_offset;
85806 left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
85807 copied = bytes - left;
85808@@ -2005,7 +2005,7 @@ size_t iov_iter_copy_from_user(struct page *page,
85809
85810 kaddr = kmap(page);
85811 if (likely(i->nr_segs == 1)) {
85812- int left;
85813+ size_t left;
85814 char __user *buf = i->iov->iov_base + i->iov_offset;
85815 left = __copy_from_user(kaddr + offset, buf, bytes);
85816 copied = bytes - left;
85817@@ -2035,7 +2035,7 @@ void iov_iter_advance(struct iov_iter *i, size_t bytes)
85818 * zero-length segments (without overruning the iovec).
85819 */
85820 while (bytes || unlikely(i->count && !iov->iov_len)) {
85821- int copy;
85822+ size_t copy;
85823
85824 copy = min(bytes, iov->iov_len - base);
85825 BUG_ON(!i->count || i->count < copy);
bb5f0bf8
AF
85826@@ -2106,6 +2106,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
85827 *pos = i_size_read(inode);
85828
85829 if (limit != RLIM_INFINITY) {
85830+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
85831 if (*pos >= limit) {
85832 send_sig(SIGXFSZ, current, 0);
85833 return -EFBIG;
85834diff --git a/mm/fremap.c b/mm/fremap.c
85835index 87da359..3f41cb1 100644
85836--- a/mm/fremap.c
85837+++ b/mm/fremap.c
85838@@ -158,6 +158,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
85839 retry:
85840 vma = find_vma(mm, start);
85841
85842+#ifdef CONFIG_PAX_SEGMEXEC
85843+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
85844+ goto out;
85845+#endif
85846+
85847 /*
85848 * Make sure the vma is shared, that it supports prefaulting,
85849 * and that the remapped range is valid and fully within
85850diff --git a/mm/highmem.c b/mm/highmem.c
85851index b32b70c..e512eb0 100644
85852--- a/mm/highmem.c
85853+++ b/mm/highmem.c
85854@@ -138,8 +138,9 @@ static void flush_all_zero_pkmaps(void)
85855 * So no dangers, even with speculative execution.
85856 */
85857 page = pte_page(pkmap_page_table[i]);
85858+ pax_open_kernel();
85859 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
85860-
85861+ pax_close_kernel();
85862 set_page_address(page, NULL);
85863 need_flush = 1;
85864 }
85865@@ -198,9 +199,11 @@ start:
85866 }
85867 }
85868 vaddr = PKMAP_ADDR(last_pkmap_nr);
85869+
85870+ pax_open_kernel();
85871 set_pte_at(&init_mm, vaddr,
85872 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
85873-
85874+ pax_close_kernel();
85875 pkmap_count[last_pkmap_nr] = 1;
85876 set_page_address(page, (void *)vaddr);
85877
85878diff --git a/mm/hugetlb.c b/mm/hugetlb.c
85879index 7c5eb85..5c01c2f 100644
85880--- a/mm/hugetlb.c
85881+++ b/mm/hugetlb.c
85882@@ -2022,15 +2022,17 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
85883 struct hstate *h = &default_hstate;
85884 unsigned long tmp;
85885 int ret;
85886+ ctl_table_no_const hugetlb_table;
85887
85888 tmp = h->max_huge_pages;
85889
85890 if (write && h->order >= MAX_ORDER)
85891 return -EINVAL;
85892
85893- table->data = &tmp;
85894- table->maxlen = sizeof(unsigned long);
85895- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
85896+ hugetlb_table = *table;
85897+ hugetlb_table.data = &tmp;
85898+ hugetlb_table.maxlen = sizeof(unsigned long);
85899+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
85900 if (ret)
85901 goto out;
85902
85903@@ -2087,15 +2089,17 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
85904 struct hstate *h = &default_hstate;
85905 unsigned long tmp;
85906 int ret;
85907+ ctl_table_no_const hugetlb_table;
85908
85909 tmp = h->nr_overcommit_huge_pages;
85910
85911 if (write && h->order >= MAX_ORDER)
85912 return -EINVAL;
85913
85914- table->data = &tmp;
85915- table->maxlen = sizeof(unsigned long);
85916- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
85917+ hugetlb_table = *table;
85918+ hugetlb_table.data = &tmp;
85919+ hugetlb_table.maxlen = sizeof(unsigned long);
85920+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
85921 if (ret)
85922 goto out;
85923
85924@@ -2545,6 +2549,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
85925 return 1;
85926 }
85927
85928+#ifdef CONFIG_PAX_SEGMEXEC
85929+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
85930+{
85931+ struct mm_struct *mm = vma->vm_mm;
85932+ struct vm_area_struct *vma_m;
85933+ unsigned long address_m;
85934+ pte_t *ptep_m;
85935+
85936+ vma_m = pax_find_mirror_vma(vma);
85937+ if (!vma_m)
85938+ return;
85939+
85940+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
85941+ address_m = address + SEGMEXEC_TASK_SIZE;
85942+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
85943+ get_page(page_m);
85944+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
85945+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
85946+}
85947+#endif
85948+
85949 /*
85950 * Hugetlb_cow() should be called with page lock of the original hugepage held.
85951 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
85952@@ -2663,6 +2688,11 @@ retry_avoidcopy:
85953 make_huge_pte(vma, new_page, 1));
85954 page_remove_rmap(old_page);
85955 hugepage_add_new_anon_rmap(new_page, vma, address);
85956+
85957+#ifdef CONFIG_PAX_SEGMEXEC
85958+ pax_mirror_huge_pte(vma, address, new_page);
85959+#endif
85960+
85961 /* Make the old page be freed below */
85962 new_page = old_page;
85963 }
85964@@ -2821,6 +2851,10 @@ retry:
85965 && (vma->vm_flags & VM_SHARED)));
85966 set_huge_pte_at(mm, address, ptep, new_pte);
85967
85968+#ifdef CONFIG_PAX_SEGMEXEC
85969+ pax_mirror_huge_pte(vma, address, page);
85970+#endif
85971+
85972 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
85973 /* Optimization, do the COW without a second fault */
85974 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
85975@@ -2850,6 +2884,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
85976 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
85977 struct hstate *h = hstate_vma(vma);
85978
85979+#ifdef CONFIG_PAX_SEGMEXEC
85980+ struct vm_area_struct *vma_m;
85981+#endif
85982+
85983 address &= huge_page_mask(h);
85984
85985 ptep = huge_pte_offset(mm, address);
85986@@ -2863,6 +2901,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
85987 VM_FAULT_SET_HINDEX(hstate_index(h));
85988 }
85989
85990+#ifdef CONFIG_PAX_SEGMEXEC
85991+ vma_m = pax_find_mirror_vma(vma);
85992+ if (vma_m) {
85993+ unsigned long address_m;
85994+
85995+ if (vma->vm_start > vma_m->vm_start) {
85996+ address_m = address;
85997+ address -= SEGMEXEC_TASK_SIZE;
85998+ vma = vma_m;
85999+ h = hstate_vma(vma);
86000+ } else
86001+ address_m = address + SEGMEXEC_TASK_SIZE;
86002+
86003+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
86004+ return VM_FAULT_OOM;
86005+ address_m &= HPAGE_MASK;
86006+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
86007+ }
86008+#endif
86009+
86010 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
86011 if (!ptep)
86012 return VM_FAULT_OOM;
86013diff --git a/mm/internal.h b/mm/internal.h
86014index 8562de0..92b2073 100644
86015--- a/mm/internal.h
86016+++ b/mm/internal.h
86017@@ -100,6 +100,7 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
86018 * in mm/page_alloc.c
86019 */
86020 extern void __free_pages_bootmem(struct page *page, unsigned int order);
86021+extern void free_compound_page(struct page *page);
86022 extern void prep_compound_page(struct page *page, unsigned long order);
86023 #ifdef CONFIG_MEMORY_FAILURE
86024 extern bool is_free_buddy_page(struct page *page);
86025@@ -355,7 +356,7 @@ extern u32 hwpoison_filter_enable;
86026
86027 extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
86028 unsigned long, unsigned long,
86029- unsigned long, unsigned long);
86030+ unsigned long, unsigned long) __intentional_overflow(-1);
86031
86032 extern void set_pageblock_order(void);
86033 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
86034diff --git a/mm/kmemleak.c b/mm/kmemleak.c
86035index c8d7f31..2dbeffd 100644
86036--- a/mm/kmemleak.c
86037+++ b/mm/kmemleak.c
86038@@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
86039
86040 for (i = 0; i < object->trace_len; i++) {
86041 void *ptr = (void *)object->trace[i];
86042- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
86043+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
86044 }
86045 }
86046
86047@@ -1851,7 +1851,7 @@ static int __init kmemleak_late_init(void)
86048 return -ENOMEM;
86049 }
86050
86051- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
86052+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
86053 &kmemleak_fops);
86054 if (!dentry)
86055 pr_warning("Failed to create the debugfs kmemleak file\n");
86056diff --git a/mm/maccess.c b/mm/maccess.c
86057index d53adf9..03a24bf 100644
86058--- a/mm/maccess.c
86059+++ b/mm/maccess.c
86060@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
86061 set_fs(KERNEL_DS);
86062 pagefault_disable();
86063 ret = __copy_from_user_inatomic(dst,
86064- (__force const void __user *)src, size);
86065+ (const void __force_user *)src, size);
86066 pagefault_enable();
86067 set_fs(old_fs);
86068
86069@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
86070
86071 set_fs(KERNEL_DS);
86072 pagefault_disable();
86073- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
86074+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
86075 pagefault_enable();
86076 set_fs(old_fs);
86077
86078diff --git a/mm/madvise.c b/mm/madvise.c
86079index 7055883..aafb1ed 100644
86080--- a/mm/madvise.c
86081+++ b/mm/madvise.c
86082@@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
86083 pgoff_t pgoff;
86084 unsigned long new_flags = vma->vm_flags;
86085
86086+#ifdef CONFIG_PAX_SEGMEXEC
86087+ struct vm_area_struct *vma_m;
86088+#endif
86089+
86090 switch (behavior) {
86091 case MADV_NORMAL:
86092 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
86093@@ -126,6 +130,13 @@ success:
86094 /*
86095 * vm_flags is protected by the mmap_sem held in write mode.
86096 */
86097+
86098+#ifdef CONFIG_PAX_SEGMEXEC
86099+ vma_m = pax_find_mirror_vma(vma);
86100+ if (vma_m)
86101+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
86102+#endif
86103+
86104 vma->vm_flags = new_flags;
86105
86106 out:
86107@@ -274,6 +285,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
86108 struct vm_area_struct ** prev,
86109 unsigned long start, unsigned long end)
86110 {
86111+
86112+#ifdef CONFIG_PAX_SEGMEXEC
86113+ struct vm_area_struct *vma_m;
86114+#endif
86115+
86116 *prev = vma;
86117 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
86118 return -EINVAL;
86119@@ -286,6 +302,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
86120 zap_page_range(vma, start, end - start, &details);
86121 } else
86122 zap_page_range(vma, start, end - start, NULL);
86123+
86124+#ifdef CONFIG_PAX_SEGMEXEC
86125+ vma_m = pax_find_mirror_vma(vma);
86126+ if (vma_m) {
86127+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
86128+ struct zap_details details = {
86129+ .nonlinear_vma = vma_m,
86130+ .last_index = ULONG_MAX,
86131+ };
86132+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
86133+ } else
86134+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
86135+ }
86136+#endif
86137+
86138 return 0;
86139 }
86140
86141@@ -485,6 +516,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
86142 if (end < start)
86143 return error;
86144
86145+#ifdef CONFIG_PAX_SEGMEXEC
86146+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
86147+ if (end > SEGMEXEC_TASK_SIZE)
86148+ return error;
86149+ } else
86150+#endif
86151+
86152+ if (end > TASK_SIZE)
86153+ return error;
86154+
86155 error = 0;
86156 if (end == start)
86157 return error;
86158diff --git a/mm/memory-failure.c b/mm/memory-failure.c
86159index ceb0c7f..b2b8e94 100644
86160--- a/mm/memory-failure.c
86161+++ b/mm/memory-failure.c
86162@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
86163
86164 int sysctl_memory_failure_recovery __read_mostly = 1;
86165
86166-atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
86167+atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
86168
86169 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
86170
86171@@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
86172 pfn, t->comm, t->pid);
86173 si.si_signo = SIGBUS;
86174 si.si_errno = 0;
86175- si.si_addr = (void *)addr;
86176+ si.si_addr = (void __user *)addr;
86177 #ifdef __ARCH_SI_TRAPNO
86178 si.si_trapno = trapno;
86179 #endif
86180@@ -760,7 +760,7 @@ static struct page_state {
86181 unsigned long res;
86182 char *msg;
86183 int (*action)(struct page *p, unsigned long pfn);
86184-} error_states[] = {
86185+} __do_const error_states[] = {
86186 { reserved, reserved, "reserved kernel", me_kernel },
86187 /*
86188 * free pages are specially detected outside this table:
86189@@ -1051,7 +1051,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
86190 nr_pages = 1 << compound_order(hpage);
86191 else /* normal page or thp */
86192 nr_pages = 1;
86193- atomic_long_add(nr_pages, &num_poisoned_pages);
86194+ atomic_long_add_unchecked(nr_pages, &num_poisoned_pages);
86195
86196 /*
86197 * We need/can do nothing about count=0 pages.
86198@@ -1081,7 +1081,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
86199 if (!PageHWPoison(hpage)
86200 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
86201 || (p != hpage && TestSetPageHWPoison(hpage))) {
86202- atomic_long_sub(nr_pages, &num_poisoned_pages);
86203+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
86204 return 0;
86205 }
86206 set_page_hwpoison_huge_page(hpage);
86207@@ -1148,7 +1148,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
86208 }
86209 if (hwpoison_filter(p)) {
86210 if (TestClearPageHWPoison(p))
86211- atomic_long_sub(nr_pages, &num_poisoned_pages);
86212+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
86213 unlock_page(hpage);
86214 put_page(hpage);
86215 return 0;
86216@@ -1350,7 +1350,7 @@ int unpoison_memory(unsigned long pfn)
86217 return 0;
86218 }
86219 if (TestClearPageHWPoison(p))
86220- atomic_long_sub(nr_pages, &num_poisoned_pages);
86221+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
86222 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
86223 return 0;
86224 }
86225@@ -1364,7 +1364,7 @@ int unpoison_memory(unsigned long pfn)
86226 */
86227 if (TestClearPageHWPoison(page)) {
86228 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
86229- atomic_long_sub(nr_pages, &num_poisoned_pages);
86230+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
86231 freeit = 1;
86232 if (PageHuge(page))
86233 clear_page_hwpoison_huge_page(page);
86234@@ -1491,7 +1491,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
86235 } else {
86236 set_page_hwpoison_huge_page(hpage);
86237 dequeue_hwpoisoned_huge_page(hpage);
86238- atomic_long_add(1 << compound_trans_order(hpage),
86239+ atomic_long_add_unchecked(1 << compound_trans_order(hpage),
86240 &num_poisoned_pages);
86241 }
86242 /* keep elevated page count for bad page */
86243@@ -1552,11 +1552,11 @@ int soft_offline_page(struct page *page, int flags)
86244 if (PageHuge(page)) {
86245 set_page_hwpoison_huge_page(hpage);
86246 dequeue_hwpoisoned_huge_page(hpage);
86247- atomic_long_add(1 << compound_trans_order(hpage),
86248+ atomic_long_add_unchecked(1 << compound_trans_order(hpage),
86249 &num_poisoned_pages);
86250 } else {
86251 SetPageHWPoison(page);
86252- atomic_long_inc(&num_poisoned_pages);
86253+ atomic_long_inc_unchecked(&num_poisoned_pages);
86254 }
86255 }
86256 /* keep elevated page count for bad page */
86257@@ -1596,7 +1596,7 @@ static int __soft_offline_page(struct page *page, int flags)
86258 put_page(page);
86259 pr_info("soft_offline: %#lx: invalidated\n", pfn);
86260 SetPageHWPoison(page);
86261- atomic_long_inc(&num_poisoned_pages);
86262+ atomic_long_inc_unchecked(&num_poisoned_pages);
86263 return 0;
86264 }
86265
86266@@ -1626,7 +1626,7 @@ static int __soft_offline_page(struct page *page, int flags)
86267 ret = -EIO;
86268 } else {
86269 SetPageHWPoison(page);
86270- atomic_long_inc(&num_poisoned_pages);
86271+ atomic_long_inc_unchecked(&num_poisoned_pages);
86272 }
86273 } else {
86274 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
86275diff --git a/mm/memory.c b/mm/memory.c
86276index 5a35443..7c0340f 100644
86277--- a/mm/memory.c
86278+++ b/mm/memory.c
86279@@ -428,6 +428,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
86280 free_pte_range(tlb, pmd, addr);
86281 } while (pmd++, addr = next, addr != end);
86282
86283+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
86284 start &= PUD_MASK;
86285 if (start < floor)
86286 return;
86287@@ -442,6 +443,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
86288 pmd = pmd_offset(pud, start);
86289 pud_clear(pud);
86290 pmd_free_tlb(tlb, pmd, start);
86291+#endif
86292+
86293 }
86294
86295 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
86296@@ -461,6 +464,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
86297 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
86298 } while (pud++, addr = next, addr != end);
86299
86300+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
86301 start &= PGDIR_MASK;
86302 if (start < floor)
86303 return;
86304@@ -475,6 +479,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
86305 pud = pud_offset(pgd, start);
86306 pgd_clear(pgd);
86307 pud_free_tlb(tlb, pud, start);
86308+#endif
86309+
86310 }
86311
86312 /*
86313@@ -1644,12 +1650,6 @@ no_page_table:
86314 return page;
86315 }
86316
86317-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
86318-{
86319- return stack_guard_page_start(vma, addr) ||
86320- stack_guard_page_end(vma, addr+PAGE_SIZE);
86321-}
86322-
86323 /**
86324 * __get_user_pages() - pin user pages in memory
86325 * @tsk: task_struct of target task
86326@@ -1736,10 +1736,10 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
86327
86328 i = 0;
86329
86330- do {
86331+ while (nr_pages) {
86332 struct vm_area_struct *vma;
86333
86334- vma = find_extend_vma(mm, start);
86335+ vma = find_vma(mm, start);
86336 if (!vma && in_gate_area(mm, start)) {
86337 unsigned long pg = start & PAGE_MASK;
86338 pgd_t *pgd;
86339@@ -1788,7 +1788,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
86340 goto next_page;
86341 }
86342
86343- if (!vma ||
86344+ if (!vma || start < vma->vm_start ||
86345 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
86346 !(vm_flags & vma->vm_flags))
86347 return i ? : -EFAULT;
86348@@ -1817,11 +1817,6 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
86349 int ret;
86350 unsigned int fault_flags = 0;
86351
86352- /* For mlock, just skip the stack guard page. */
86353- if (foll_flags & FOLL_MLOCK) {
86354- if (stack_guard_page(vma, start))
86355- goto next_page;
86356- }
86357 if (foll_flags & FOLL_WRITE)
86358 fault_flags |= FAULT_FLAG_WRITE;
86359 if (nonblocking)
86360@@ -1901,7 +1896,7 @@ next_page:
86361 start += page_increm * PAGE_SIZE;
86362 nr_pages -= page_increm;
86363 } while (nr_pages && start < vma->vm_end);
86364- } while (nr_pages);
86365+ }
86366 return i;
86367 }
86368 EXPORT_SYMBOL(__get_user_pages);
86369@@ -2108,6 +2103,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
86370 page_add_file_rmap(page);
86371 set_pte_at(mm, addr, pte, mk_pte(page, prot));
86372
86373+#ifdef CONFIG_PAX_SEGMEXEC
86374+ pax_mirror_file_pte(vma, addr, page, ptl);
86375+#endif
86376+
86377 retval = 0;
86378 pte_unmap_unlock(pte, ptl);
86379 return retval;
86380@@ -2152,9 +2151,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
86381 if (!page_count(page))
86382 return -EINVAL;
86383 if (!(vma->vm_flags & VM_MIXEDMAP)) {
86384+
86385+#ifdef CONFIG_PAX_SEGMEXEC
86386+ struct vm_area_struct *vma_m;
86387+#endif
86388+
86389 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
86390 BUG_ON(vma->vm_flags & VM_PFNMAP);
86391 vma->vm_flags |= VM_MIXEDMAP;
86392+
86393+#ifdef CONFIG_PAX_SEGMEXEC
86394+ vma_m = pax_find_mirror_vma(vma);
86395+ if (vma_m)
86396+ vma_m->vm_flags |= VM_MIXEDMAP;
86397+#endif
86398+
86399 }
86400 return insert_page(vma, addr, page, vma->vm_page_prot);
86401 }
86402@@ -2237,6 +2248,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
86403 unsigned long pfn)
86404 {
86405 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
86406+ BUG_ON(vma->vm_mirror);
86407
86408 if (addr < vma->vm_start || addr >= vma->vm_end)
86409 return -EFAULT;
86410@@ -2484,7 +2496,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
86411
86412 BUG_ON(pud_huge(*pud));
86413
86414- pmd = pmd_alloc(mm, pud, addr);
86415+ pmd = (mm == &init_mm) ?
86416+ pmd_alloc_kernel(mm, pud, addr) :
86417+ pmd_alloc(mm, pud, addr);
86418 if (!pmd)
86419 return -ENOMEM;
86420 do {
86421@@ -2504,7 +2518,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
86422 unsigned long next;
86423 int err;
86424
86425- pud = pud_alloc(mm, pgd, addr);
86426+ pud = (mm == &init_mm) ?
86427+ pud_alloc_kernel(mm, pgd, addr) :
86428+ pud_alloc(mm, pgd, addr);
86429 if (!pud)
86430 return -ENOMEM;
86431 do {
86432@@ -2592,6 +2608,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
86433 copy_user_highpage(dst, src, va, vma);
86434 }
86435
86436+#ifdef CONFIG_PAX_SEGMEXEC
86437+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
86438+{
86439+ struct mm_struct *mm = vma->vm_mm;
86440+ spinlock_t *ptl;
86441+ pte_t *pte, entry;
86442+
86443+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
86444+ entry = *pte;
86445+ if (!pte_present(entry)) {
86446+ if (!pte_none(entry)) {
86447+ BUG_ON(pte_file(entry));
86448+ free_swap_and_cache(pte_to_swp_entry(entry));
86449+ pte_clear_not_present_full(mm, address, pte, 0);
86450+ }
86451+ } else {
86452+ struct page *page;
86453+
86454+ flush_cache_page(vma, address, pte_pfn(entry));
86455+ entry = ptep_clear_flush(vma, address, pte);
86456+ BUG_ON(pte_dirty(entry));
86457+ page = vm_normal_page(vma, address, entry);
86458+ if (page) {
86459+ update_hiwater_rss(mm);
86460+ if (PageAnon(page))
86461+ dec_mm_counter_fast(mm, MM_ANONPAGES);
86462+ else
86463+ dec_mm_counter_fast(mm, MM_FILEPAGES);
86464+ page_remove_rmap(page);
86465+ page_cache_release(page);
86466+ }
86467+ }
86468+ pte_unmap_unlock(pte, ptl);
86469+}
86470+
86471+/* PaX: if vma is mirrored, synchronize the mirror's PTE
86472+ *
86473+ * the ptl of the lower mapped page is held on entry and is not released on exit
86474+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
86475+ */
86476+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
86477+{
86478+ struct mm_struct *mm = vma->vm_mm;
86479+ unsigned long address_m;
86480+ spinlock_t *ptl_m;
86481+ struct vm_area_struct *vma_m;
86482+ pmd_t *pmd_m;
86483+ pte_t *pte_m, entry_m;
86484+
86485+ BUG_ON(!page_m || !PageAnon(page_m));
86486+
86487+ vma_m = pax_find_mirror_vma(vma);
86488+ if (!vma_m)
86489+ return;
86490+
86491+ BUG_ON(!PageLocked(page_m));
86492+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
86493+ address_m = address + SEGMEXEC_TASK_SIZE;
86494+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
86495+ pte_m = pte_offset_map(pmd_m, address_m);
86496+ ptl_m = pte_lockptr(mm, pmd_m);
86497+ if (ptl != ptl_m) {
86498+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
86499+ if (!pte_none(*pte_m))
86500+ goto out;
86501+ }
86502+
86503+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
86504+ page_cache_get(page_m);
86505+ page_add_anon_rmap(page_m, vma_m, address_m);
86506+ inc_mm_counter_fast(mm, MM_ANONPAGES);
86507+ set_pte_at(mm, address_m, pte_m, entry_m);
86508+ update_mmu_cache(vma_m, address_m, pte_m);
86509+out:
86510+ if (ptl != ptl_m)
86511+ spin_unlock(ptl_m);
86512+ pte_unmap(pte_m);
86513+ unlock_page(page_m);
86514+}
86515+
86516+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
86517+{
86518+ struct mm_struct *mm = vma->vm_mm;
86519+ unsigned long address_m;
86520+ spinlock_t *ptl_m;
86521+ struct vm_area_struct *vma_m;
86522+ pmd_t *pmd_m;
86523+ pte_t *pte_m, entry_m;
86524+
86525+ BUG_ON(!page_m || PageAnon(page_m));
86526+
86527+ vma_m = pax_find_mirror_vma(vma);
86528+ if (!vma_m)
86529+ return;
86530+
86531+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
86532+ address_m = address + SEGMEXEC_TASK_SIZE;
86533+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
86534+ pte_m = pte_offset_map(pmd_m, address_m);
86535+ ptl_m = pte_lockptr(mm, pmd_m);
86536+ if (ptl != ptl_m) {
86537+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
86538+ if (!pte_none(*pte_m))
86539+ goto out;
86540+ }
86541+
86542+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
86543+ page_cache_get(page_m);
86544+ page_add_file_rmap(page_m);
86545+ inc_mm_counter_fast(mm, MM_FILEPAGES);
86546+ set_pte_at(mm, address_m, pte_m, entry_m);
86547+ update_mmu_cache(vma_m, address_m, pte_m);
86548+out:
86549+ if (ptl != ptl_m)
86550+ spin_unlock(ptl_m);
86551+ pte_unmap(pte_m);
86552+}
86553+
86554+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
86555+{
86556+ struct mm_struct *mm = vma->vm_mm;
86557+ unsigned long address_m;
86558+ spinlock_t *ptl_m;
86559+ struct vm_area_struct *vma_m;
86560+ pmd_t *pmd_m;
86561+ pte_t *pte_m, entry_m;
86562+
86563+ vma_m = pax_find_mirror_vma(vma);
86564+ if (!vma_m)
86565+ return;
86566+
86567+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
86568+ address_m = address + SEGMEXEC_TASK_SIZE;
86569+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
86570+ pte_m = pte_offset_map(pmd_m, address_m);
86571+ ptl_m = pte_lockptr(mm, pmd_m);
86572+ if (ptl != ptl_m) {
86573+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
86574+ if (!pte_none(*pte_m))
86575+ goto out;
86576+ }
86577+
86578+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
86579+ set_pte_at(mm, address_m, pte_m, entry_m);
86580+out:
86581+ if (ptl != ptl_m)
86582+ spin_unlock(ptl_m);
86583+ pte_unmap(pte_m);
86584+}
86585+
86586+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
86587+{
86588+ struct page *page_m;
86589+ pte_t entry;
86590+
86591+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
86592+ goto out;
86593+
86594+ entry = *pte;
86595+ page_m = vm_normal_page(vma, address, entry);
86596+ if (!page_m)
86597+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
86598+ else if (PageAnon(page_m)) {
86599+ if (pax_find_mirror_vma(vma)) {
86600+ pte_unmap_unlock(pte, ptl);
86601+ lock_page(page_m);
86602+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
86603+ if (pte_same(entry, *pte))
86604+ pax_mirror_anon_pte(vma, address, page_m, ptl);
86605+ else
86606+ unlock_page(page_m);
86607+ }
86608+ } else
86609+ pax_mirror_file_pte(vma, address, page_m, ptl);
86610+
86611+out:
86612+ pte_unmap_unlock(pte, ptl);
86613+}
86614+#endif
86615+
86616 /*
86617 * This routine handles present pages, when users try to write
86618 * to a shared page. It is done by copying the page to a new address
86619@@ -2808,6 +3004,12 @@ gotten:
86620 */
86621 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
86622 if (likely(pte_same(*page_table, orig_pte))) {
86623+
86624+#ifdef CONFIG_PAX_SEGMEXEC
86625+ if (pax_find_mirror_vma(vma))
86626+ BUG_ON(!trylock_page(new_page));
86627+#endif
86628+
86629 if (old_page) {
86630 if (!PageAnon(old_page)) {
86631 dec_mm_counter_fast(mm, MM_FILEPAGES);
86632@@ -2859,6 +3061,10 @@ gotten:
86633 page_remove_rmap(old_page);
86634 }
86635
86636+#ifdef CONFIG_PAX_SEGMEXEC
86637+ pax_mirror_anon_pte(vma, address, new_page, ptl);
86638+#endif
86639+
86640 /* Free the old page.. */
86641 new_page = old_page;
86642 ret |= VM_FAULT_WRITE;
86643@@ -3134,6 +3340,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
86644 swap_free(entry);
86645 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
86646 try_to_free_swap(page);
86647+
86648+#ifdef CONFIG_PAX_SEGMEXEC
86649+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
86650+#endif
86651+
86652 unlock_page(page);
86653 if (page != swapcache) {
86654 /*
86655@@ -3157,6 +3368,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
86656
86657 /* No need to invalidate - it was non-present before */
86658 update_mmu_cache(vma, address, page_table);
86659+
86660+#ifdef CONFIG_PAX_SEGMEXEC
86661+ pax_mirror_anon_pte(vma, address, page, ptl);
86662+#endif
86663+
86664 unlock:
86665 pte_unmap_unlock(page_table, ptl);
86666 out:
86667@@ -3176,40 +3392,6 @@ out_release:
86668 }
86669
86670 /*
86671- * This is like a special single-page "expand_{down|up}wards()",
86672- * except we must first make sure that 'address{-|+}PAGE_SIZE'
86673- * doesn't hit another vma.
86674- */
86675-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
86676-{
86677- address &= PAGE_MASK;
86678- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
86679- struct vm_area_struct *prev = vma->vm_prev;
86680-
86681- /*
86682- * Is there a mapping abutting this one below?
86683- *
86684- * That's only ok if it's the same stack mapping
86685- * that has gotten split..
86686- */
86687- if (prev && prev->vm_end == address)
86688- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
86689-
86690- expand_downwards(vma, address - PAGE_SIZE);
86691- }
86692- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
86693- struct vm_area_struct *next = vma->vm_next;
86694-
86695- /* As VM_GROWSDOWN but s/below/above/ */
86696- if (next && next->vm_start == address + PAGE_SIZE)
86697- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
86698-
86699- expand_upwards(vma, address + PAGE_SIZE);
86700- }
86701- return 0;
86702-}
86703-
86704-/*
86705 * We enter with non-exclusive mmap_sem (to exclude vma changes,
86706 * but allow concurrent faults), and pte mapped but not yet locked.
86707 * We return with mmap_sem still held, but pte unmapped and unlocked.
86708@@ -3218,27 +3400,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
86709 unsigned long address, pte_t *page_table, pmd_t *pmd,
86710 unsigned int flags)
86711 {
86712- struct page *page;
86713+ struct page *page = NULL;
86714 spinlock_t *ptl;
86715 pte_t entry;
86716
86717- pte_unmap(page_table);
86718-
86719- /* Check if we need to add a guard page to the stack */
86720- if (check_stack_guard_page(vma, address) < 0)
86721- return VM_FAULT_SIGBUS;
86722-
86723- /* Use the zero-page for reads */
86724 if (!(flags & FAULT_FLAG_WRITE)) {
86725 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
86726 vma->vm_page_prot));
86727- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
86728+ ptl = pte_lockptr(mm, pmd);
86729+ spin_lock(ptl);
86730 if (!pte_none(*page_table))
86731 goto unlock;
86732 goto setpte;
86733 }
86734
86735 /* Allocate our own private page. */
86736+ pte_unmap(page_table);
86737+
86738 if (unlikely(anon_vma_prepare(vma)))
86739 goto oom;
86740 page = alloc_zeroed_user_highpage_movable(vma, address);
86741@@ -3262,6 +3440,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
86742 if (!pte_none(*page_table))
86743 goto release;
86744
86745+#ifdef CONFIG_PAX_SEGMEXEC
86746+ if (pax_find_mirror_vma(vma))
86747+ BUG_ON(!trylock_page(page));
86748+#endif
86749+
86750 inc_mm_counter_fast(mm, MM_ANONPAGES);
86751 page_add_new_anon_rmap(page, vma, address);
86752 setpte:
86753@@ -3269,6 +3452,12 @@ setpte:
86754
86755 /* No need to invalidate - it was non-present before */
86756 update_mmu_cache(vma, address, page_table);
86757+
86758+#ifdef CONFIG_PAX_SEGMEXEC
86759+ if (page)
86760+ pax_mirror_anon_pte(vma, address, page, ptl);
86761+#endif
86762+
86763 unlock:
86764 pte_unmap_unlock(page_table, ptl);
86765 return 0;
86766@@ -3412,6 +3601,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
86767 */
86768 /* Only go through if we didn't race with anybody else... */
86769 if (likely(pte_same(*page_table, orig_pte))) {
86770+
86771+#ifdef CONFIG_PAX_SEGMEXEC
86772+ if (anon && pax_find_mirror_vma(vma))
86773+ BUG_ON(!trylock_page(page));
86774+#endif
86775+
86776 flush_icache_page(vma, page);
86777 entry = mk_pte(page, vma->vm_page_prot);
86778 if (flags & FAULT_FLAG_WRITE)
86779@@ -3431,6 +3626,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
86780
86781 /* no need to invalidate: a not-present page won't be cached */
86782 update_mmu_cache(vma, address, page_table);
86783+
86784+#ifdef CONFIG_PAX_SEGMEXEC
86785+ if (anon)
86786+ pax_mirror_anon_pte(vma, address, page, ptl);
86787+ else
86788+ pax_mirror_file_pte(vma, address, page, ptl);
86789+#endif
86790+
86791 } else {
86792 if (cow_page)
86793 mem_cgroup_uncharge_page(cow_page);
86794@@ -3752,6 +3955,12 @@ int handle_pte_fault(struct mm_struct *mm,
86795 if (flags & FAULT_FLAG_WRITE)
86796 flush_tlb_fix_spurious_fault(vma, address);
86797 }
86798+
86799+#ifdef CONFIG_PAX_SEGMEXEC
86800+ pax_mirror_pte(vma, address, pte, pmd, ptl);
86801+ return 0;
86802+#endif
86803+
86804 unlock:
86805 pte_unmap_unlock(pte, ptl);
86806 return 0;
86807@@ -3768,6 +3977,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
86808 pmd_t *pmd;
86809 pte_t *pte;
86810
86811+#ifdef CONFIG_PAX_SEGMEXEC
86812+ struct vm_area_struct *vma_m;
86813+#endif
86814+
86815 __set_current_state(TASK_RUNNING);
86816
86817 count_vm_event(PGFAULT);
86818@@ -3779,6 +3992,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
86819 if (unlikely(is_vm_hugetlb_page(vma)))
86820 return hugetlb_fault(mm, vma, address, flags);
86821
86822+#ifdef CONFIG_PAX_SEGMEXEC
86823+ vma_m = pax_find_mirror_vma(vma);
86824+ if (vma_m) {
86825+ unsigned long address_m;
86826+ pgd_t *pgd_m;
86827+ pud_t *pud_m;
86828+ pmd_t *pmd_m;
86829+
86830+ if (vma->vm_start > vma_m->vm_start) {
86831+ address_m = address;
86832+ address -= SEGMEXEC_TASK_SIZE;
86833+ vma = vma_m;
86834+ } else
86835+ address_m = address + SEGMEXEC_TASK_SIZE;
86836+
86837+ pgd_m = pgd_offset(mm, address_m);
86838+ pud_m = pud_alloc(mm, pgd_m, address_m);
86839+ if (!pud_m)
86840+ return VM_FAULT_OOM;
86841+ pmd_m = pmd_alloc(mm, pud_m, address_m);
86842+ if (!pmd_m)
86843+ return VM_FAULT_OOM;
86844+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
86845+ return VM_FAULT_OOM;
86846+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
86847+ }
86848+#endif
86849+
86850 retry:
86851 pgd = pgd_offset(mm, address);
86852 pud = pud_alloc(mm, pgd, address);
86853@@ -3877,6 +4118,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
86854 spin_unlock(&mm->page_table_lock);
86855 return 0;
86856 }
86857+
86858+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
86859+{
86860+ pud_t *new = pud_alloc_one(mm, address);
86861+ if (!new)
86862+ return -ENOMEM;
86863+
86864+ smp_wmb(); /* See comment in __pte_alloc */
86865+
86866+ spin_lock(&mm->page_table_lock);
86867+ if (pgd_present(*pgd)) /* Another has populated it */
86868+ pud_free(mm, new);
86869+ else
86870+ pgd_populate_kernel(mm, pgd, new);
86871+ spin_unlock(&mm->page_table_lock);
86872+ return 0;
86873+}
86874 #endif /* __PAGETABLE_PUD_FOLDED */
86875
86876 #ifndef __PAGETABLE_PMD_FOLDED
86877@@ -3907,6 +4165,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
86878 spin_unlock(&mm->page_table_lock);
86879 return 0;
86880 }
86881+
86882+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
86883+{
86884+ pmd_t *new = pmd_alloc_one(mm, address);
86885+ if (!new)
86886+ return -ENOMEM;
86887+
86888+ smp_wmb(); /* See comment in __pte_alloc */
86889+
86890+ spin_lock(&mm->page_table_lock);
86891+#ifndef __ARCH_HAS_4LEVEL_HACK
86892+ if (pud_present(*pud)) /* Another has populated it */
86893+ pmd_free(mm, new);
86894+ else
86895+ pud_populate_kernel(mm, pud, new);
86896+#else
86897+ if (pgd_present(*pud)) /* Another has populated it */
86898+ pmd_free(mm, new);
86899+ else
86900+ pgd_populate_kernel(mm, pud, new);
86901+#endif /* __ARCH_HAS_4LEVEL_HACK */
86902+ spin_unlock(&mm->page_table_lock);
86903+ return 0;
86904+}
86905 #endif /* __PAGETABLE_PMD_FOLDED */
86906
86907 #if !defined(__HAVE_ARCH_GATE_AREA)
86908@@ -3920,7 +4202,7 @@ static int __init gate_vma_init(void)
86909 gate_vma.vm_start = FIXADDR_USER_START;
86910 gate_vma.vm_end = FIXADDR_USER_END;
86911 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
86912- gate_vma.vm_page_prot = __P101;
86913+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
86914
86915 return 0;
86916 }
86917@@ -4054,8 +4336,8 @@ out:
86918 return ret;
86919 }
86920
86921-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
86922- void *buf, int len, int write)
86923+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
86924+ void *buf, size_t len, int write)
86925 {
86926 resource_size_t phys_addr;
86927 unsigned long prot = 0;
86928@@ -4080,8 +4362,8 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
86929 * Access another process' address space as given in mm. If non-NULL, use the
86930 * given task for page fault accounting.
86931 */
86932-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
86933- unsigned long addr, void *buf, int len, int write)
86934+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
86935+ unsigned long addr, void *buf, size_t len, int write)
86936 {
86937 struct vm_area_struct *vma;
86938 void *old_buf = buf;
86939@@ -4089,7 +4371,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
86940 down_read(&mm->mmap_sem);
86941 /* ignore errors, just check how much was successfully transferred */
86942 while (len) {
86943- int bytes, ret, offset;
86944+ ssize_t bytes, ret, offset;
86945 void *maddr;
86946 struct page *page = NULL;
86947
86948@@ -4148,8 +4430,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
86949 *
86950 * The caller must hold a reference on @mm.
86951 */
86952-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
86953- void *buf, int len, int write)
86954+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
86955+ void *buf, size_t len, int write)
86956 {
86957 return __access_remote_vm(NULL, mm, addr, buf, len, write);
86958 }
86959@@ -4159,11 +4441,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
86960 * Source/target buffer must be kernel space,
86961 * Do not walk the page table directly, use get_user_pages
86962 */
86963-int access_process_vm(struct task_struct *tsk, unsigned long addr,
86964- void *buf, int len, int write)
86965+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
86966+ void *buf, size_t len, int write)
86967 {
86968 struct mm_struct *mm;
86969- int ret;
86970+ ssize_t ret;
86971
86972 mm = get_task_mm(tsk);
86973 if (!mm)
86974diff --git a/mm/mempolicy.c b/mm/mempolicy.c
86975index 4baf12e..5497066 100644
86976--- a/mm/mempolicy.c
86977+++ b/mm/mempolicy.c
86978@@ -708,6 +708,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
86979 unsigned long vmstart;
86980 unsigned long vmend;
86981
86982+#ifdef CONFIG_PAX_SEGMEXEC
86983+ struct vm_area_struct *vma_m;
86984+#endif
86985+
86986 vma = find_vma(mm, start);
86987 if (!vma || vma->vm_start > start)
86988 return -EFAULT;
86989@@ -751,6 +755,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
86990 err = vma_replace_policy(vma, new_pol);
86991 if (err)
86992 goto out;
86993+
86994+#ifdef CONFIG_PAX_SEGMEXEC
86995+ vma_m = pax_find_mirror_vma(vma);
86996+ if (vma_m) {
86997+ err = vma_replace_policy(vma_m, new_pol);
86998+ if (err)
86999+ goto out;
87000+ }
87001+#endif
87002+
87003 }
87004
87005 out:
87006@@ -1206,6 +1220,17 @@ static long do_mbind(unsigned long start, unsigned long len,
87007
87008 if (end < start)
87009 return -EINVAL;
87010+
87011+#ifdef CONFIG_PAX_SEGMEXEC
87012+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
87013+ if (end > SEGMEXEC_TASK_SIZE)
87014+ return -EINVAL;
87015+ } else
87016+#endif
87017+
87018+ if (end > TASK_SIZE)
87019+ return -EINVAL;
87020+
87021 if (end == start)
87022 return 0;
87023
87024@@ -1434,8 +1459,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
87025 */
87026 tcred = __task_cred(task);
87027 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
87028- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
87029- !capable(CAP_SYS_NICE)) {
87030+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
87031 rcu_read_unlock();
87032 err = -EPERM;
87033 goto out_put;
87034@@ -1466,6 +1490,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
87035 goto out;
87036 }
87037
87038+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
87039+ if (mm != current->mm &&
87040+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
87041+ mmput(mm);
87042+ err = -EPERM;
87043+ goto out;
87044+ }
87045+#endif
87046+
87047 err = do_migrate_pages(mm, old, new,
87048 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
87049
87050diff --git a/mm/migrate.c b/mm/migrate.c
87051index 6f0c244..6d1ae32 100644
87052--- a/mm/migrate.c
87053+++ b/mm/migrate.c
87054@@ -1399,8 +1399,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
87055 */
87056 tcred = __task_cred(task);
87057 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
87058- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
87059- !capable(CAP_SYS_NICE)) {
87060+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
87061 rcu_read_unlock();
87062 err = -EPERM;
87063 goto out;
87064diff --git a/mm/mlock.c b/mm/mlock.c
87065index 79b7cf7..9944291 100644
87066--- a/mm/mlock.c
87067+++ b/mm/mlock.c
87068@@ -13,6 +13,7 @@
87069 #include <linux/pagemap.h>
87070 #include <linux/mempolicy.h>
87071 #include <linux/syscalls.h>
87072+#include <linux/security.h>
87073 #include <linux/sched.h>
87074 #include <linux/export.h>
87075 #include <linux/rmap.h>
87076@@ -334,7 +335,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
87077 {
87078 unsigned long nstart, end, tmp;
87079 struct vm_area_struct * vma, * prev;
87080- int error;
87081+ int error = 0;
87082
87083 VM_BUG_ON(start & ~PAGE_MASK);
87084 VM_BUG_ON(len != PAGE_ALIGN(len));
87085@@ -343,6 +344,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
87086 return -EINVAL;
87087 if (end == start)
87088 return 0;
87089+ if (end > TASK_SIZE)
87090+ return -EINVAL;
87091+
87092 vma = find_vma(current->mm, start);
87093 if (!vma || vma->vm_start > start)
87094 return -ENOMEM;
87095@@ -354,6 +358,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
87096 for (nstart = start ; ; ) {
87097 vm_flags_t newflags;
87098
87099+#ifdef CONFIG_PAX_SEGMEXEC
87100+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
87101+ break;
87102+#endif
87103+
87104 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
87105
87106 newflags = vma->vm_flags & ~VM_LOCKED;
87107@@ -466,6 +475,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
87108 lock_limit >>= PAGE_SHIFT;
87109
87110 /* check against resource limits */
87111+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
87112 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
87113 error = do_mlock(start, len, 1);
87114 up_write(&current->mm->mmap_sem);
87115@@ -500,6 +510,11 @@ static int do_mlockall(int flags)
87116 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
87117 vm_flags_t newflags;
87118
87119+#ifdef CONFIG_PAX_SEGMEXEC
87120+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
87121+ break;
87122+#endif
87123+
87124 newflags = vma->vm_flags & ~VM_LOCKED;
87125 if (flags & MCL_CURRENT)
87126 newflags |= VM_LOCKED;
87127@@ -532,6 +547,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
87128 lock_limit >>= PAGE_SHIFT;
87129
87130 ret = -ENOMEM;
87131+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
87132 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
87133 capable(CAP_IPC_LOCK))
87134 ret = do_mlockall(flags);
87135diff --git a/mm/mmap.c b/mm/mmap.c
87136index 8d25fdc..bfb7626 100644
87137--- a/mm/mmap.c
87138+++ b/mm/mmap.c
87139@@ -36,6 +36,7 @@
87140 #include <linux/sched/sysctl.h>
87141 #include <linux/notifier.h>
87142 #include <linux/memory.h>
87143+#include <linux/random.h>
87144
87145 #include <asm/uaccess.h>
87146 #include <asm/cacheflush.h>
87147@@ -52,6 +53,16 @@
87148 #define arch_rebalance_pgtables(addr, len) (addr)
87149 #endif
87150
87151+static inline void verify_mm_writelocked(struct mm_struct *mm)
87152+{
87153+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
87154+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
87155+ up_read(&mm->mmap_sem);
87156+ BUG();
87157+ }
87158+#endif
87159+}
87160+
87161 static void unmap_region(struct mm_struct *mm,
87162 struct vm_area_struct *vma, struct vm_area_struct *prev,
87163 unsigned long start, unsigned long end);
87164@@ -71,16 +82,25 @@ static void unmap_region(struct mm_struct *mm,
87165 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
87166 *
87167 */
87168-pgprot_t protection_map[16] = {
87169+pgprot_t protection_map[16] __read_only = {
87170 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
87171 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
87172 };
87173
87174-pgprot_t vm_get_page_prot(unsigned long vm_flags)
87175+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
87176 {
87177- return __pgprot(pgprot_val(protection_map[vm_flags &
87178+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
87179 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
87180 pgprot_val(arch_vm_get_page_prot(vm_flags)));
87181+
87182+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
87183+ if (!(__supported_pte_mask & _PAGE_NX) &&
87184+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
87185+ (vm_flags & (VM_READ | VM_WRITE)))
87186+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
87187+#endif
87188+
87189+ return prot;
87190 }
87191 EXPORT_SYMBOL(vm_get_page_prot);
87192
87193@@ -89,6 +109,7 @@ int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
87194 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
87195 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
87196 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
87197+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
87198 /*
87199 * Make sure vm_committed_as in one cacheline and not cacheline shared with
87200 * other variables. It can be updated by several CPUs frequently.
87201@@ -247,6 +268,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
87202 struct vm_area_struct *next = vma->vm_next;
87203
87204 might_sleep();
87205+ BUG_ON(vma->vm_mirror);
87206 if (vma->vm_ops && vma->vm_ops->close)
87207 vma->vm_ops->close(vma);
87208 if (vma->vm_file)
87209@@ -291,6 +313,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
87210 * not page aligned -Ram Gupta
87211 */
87212 rlim = rlimit(RLIMIT_DATA);
87213+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
87214 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
87215 (mm->end_data - mm->start_data) > rlim)
87216 goto out;
87217@@ -933,6 +956,12 @@ static int
87218 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
87219 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
87220 {
87221+
87222+#ifdef CONFIG_PAX_SEGMEXEC
87223+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
87224+ return 0;
87225+#endif
87226+
87227 if (is_mergeable_vma(vma, file, vm_flags) &&
87228 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
87229 if (vma->vm_pgoff == vm_pgoff)
87230@@ -952,6 +981,12 @@ static int
87231 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
87232 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
87233 {
87234+
87235+#ifdef CONFIG_PAX_SEGMEXEC
87236+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
87237+ return 0;
87238+#endif
87239+
87240 if (is_mergeable_vma(vma, file, vm_flags) &&
87241 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
87242 pgoff_t vm_pglen;
87243@@ -994,13 +1029,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
87244 struct vm_area_struct *vma_merge(struct mm_struct *mm,
87245 struct vm_area_struct *prev, unsigned long addr,
87246 unsigned long end, unsigned long vm_flags,
87247- struct anon_vma *anon_vma, struct file *file,
87248+ struct anon_vma *anon_vma, struct file *file,
87249 pgoff_t pgoff, struct mempolicy *policy)
87250 {
87251 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
87252 struct vm_area_struct *area, *next;
87253 int err;
87254
87255+#ifdef CONFIG_PAX_SEGMEXEC
87256+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
87257+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
87258+
87259+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
87260+#endif
87261+
87262 /*
87263 * We later require that vma->vm_flags == vm_flags,
87264 * so this tests vma->vm_flags & VM_SPECIAL, too.
87265@@ -1016,6 +1058,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
87266 if (next && next->vm_end == end) /* cases 6, 7, 8 */
87267 next = next->vm_next;
87268
87269+#ifdef CONFIG_PAX_SEGMEXEC
87270+ if (prev)
87271+ prev_m = pax_find_mirror_vma(prev);
87272+ if (area)
87273+ area_m = pax_find_mirror_vma(area);
87274+ if (next)
87275+ next_m = pax_find_mirror_vma(next);
87276+#endif
87277+
87278 /*
87279 * Can it merge with the predecessor?
87280 */
87281@@ -1035,9 +1086,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
87282 /* cases 1, 6 */
87283 err = vma_adjust(prev, prev->vm_start,
87284 next->vm_end, prev->vm_pgoff, NULL);
87285- } else /* cases 2, 5, 7 */
87286+
87287+#ifdef CONFIG_PAX_SEGMEXEC
87288+ if (!err && prev_m)
87289+ err = vma_adjust(prev_m, prev_m->vm_start,
87290+ next_m->vm_end, prev_m->vm_pgoff, NULL);
87291+#endif
87292+
87293+ } else { /* cases 2, 5, 7 */
87294 err = vma_adjust(prev, prev->vm_start,
87295 end, prev->vm_pgoff, NULL);
87296+
87297+#ifdef CONFIG_PAX_SEGMEXEC
87298+ if (!err && prev_m)
87299+ err = vma_adjust(prev_m, prev_m->vm_start,
87300+ end_m, prev_m->vm_pgoff, NULL);
87301+#endif
87302+
87303+ }
87304 if (err)
87305 return NULL;
87306 khugepaged_enter_vma_merge(prev);
87307@@ -1051,12 +1117,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
87308 mpol_equal(policy, vma_policy(next)) &&
87309 can_vma_merge_before(next, vm_flags,
87310 anon_vma, file, pgoff+pglen)) {
87311- if (prev && addr < prev->vm_end) /* case 4 */
87312+ if (prev && addr < prev->vm_end) { /* case 4 */
87313 err = vma_adjust(prev, prev->vm_start,
87314 addr, prev->vm_pgoff, NULL);
87315- else /* cases 3, 8 */
87316+
87317+#ifdef CONFIG_PAX_SEGMEXEC
87318+ if (!err && prev_m)
87319+ err = vma_adjust(prev_m, prev_m->vm_start,
87320+ addr_m, prev_m->vm_pgoff, NULL);
87321+#endif
87322+
87323+ } else { /* cases 3, 8 */
87324 err = vma_adjust(area, addr, next->vm_end,
87325 next->vm_pgoff - pglen, NULL);
87326+
87327+#ifdef CONFIG_PAX_SEGMEXEC
87328+ if (!err && area_m)
87329+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
87330+ next_m->vm_pgoff - pglen, NULL);
87331+#endif
87332+
87333+ }
87334 if (err)
87335 return NULL;
87336 khugepaged_enter_vma_merge(area);
87337@@ -1165,8 +1246,10 @@ none:
87338 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
87339 struct file *file, long pages)
87340 {
87341- const unsigned long stack_flags
87342- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
87343+
87344+#ifdef CONFIG_PAX_RANDMMAP
87345+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
87346+#endif
87347
87348 mm->total_vm += pages;
87349
87350@@ -1174,7 +1257,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
87351 mm->shared_vm += pages;
87352 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
87353 mm->exec_vm += pages;
87354- } else if (flags & stack_flags)
87355+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
87356 mm->stack_vm += pages;
87357 }
87358 #endif /* CONFIG_PROC_FS */
87359@@ -1213,7 +1296,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
87360 * (the exception is when the underlying filesystem is noexec
87361 * mounted, in which case we dont add PROT_EXEC.)
87362 */
87363- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
87364+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
87365 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
87366 prot |= PROT_EXEC;
87367
87368@@ -1239,7 +1322,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
87369 /* Obtain the address to map to. we verify (or select) it and ensure
87370 * that it represents a valid section of the address space.
87371 */
87372- addr = get_unmapped_area(file, addr, len, pgoff, flags);
87373+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
87374 if (addr & ~PAGE_MASK)
87375 return addr;
87376
87377@@ -1250,6 +1333,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
87378 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
87379 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
87380
87381+#ifdef CONFIG_PAX_MPROTECT
87382+ if (mm->pax_flags & MF_PAX_MPROTECT) {
87383+
87384+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
87385+ if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
87386+ mm->binfmt->handle_mmap)
87387+ mm->binfmt->handle_mmap(file);
87388+#endif
87389+
87390+#ifndef CONFIG_PAX_MPROTECT_COMPAT
87391+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
87392+ gr_log_rwxmmap(file);
87393+
87394+#ifdef CONFIG_PAX_EMUPLT
87395+ vm_flags &= ~VM_EXEC;
87396+#else
87397+ return -EPERM;
87398+#endif
87399+
87400+ }
87401+
87402+ if (!(vm_flags & VM_EXEC))
87403+ vm_flags &= ~VM_MAYEXEC;
87404+#else
87405+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
87406+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
87407+#endif
87408+ else
87409+ vm_flags &= ~VM_MAYWRITE;
87410+ }
87411+#endif
87412+
87413+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
87414+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
87415+ vm_flags &= ~VM_PAGEEXEC;
87416+#endif
87417+
87418 if (flags & MAP_LOCKED)
87419 if (!can_do_mlock())
87420 return -EPERM;
87421@@ -1261,6 +1381,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
87422 locked += mm->locked_vm;
87423 lock_limit = rlimit(RLIMIT_MEMLOCK);
87424 lock_limit >>= PAGE_SHIFT;
87425+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
87426 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
87427 return -EAGAIN;
87428 }
87429@@ -1341,6 +1462,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
87430 vm_flags |= VM_NORESERVE;
87431 }
87432
87433+ if (!gr_acl_handle_mmap(file, prot))
87434+ return -EACCES;
87435+
87436 addr = mmap_region(file, addr, len, vm_flags, pgoff);
87437 if (!IS_ERR_VALUE(addr) &&
87438 ((vm_flags & VM_LOCKED) ||
87439@@ -1432,7 +1556,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
87440 vm_flags_t vm_flags = vma->vm_flags;
87441
87442 /* If it was private or non-writable, the write bit is already clear */
87443- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
87444+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
87445 return 0;
87446
87447 /* The backer wishes to know when pages are first written to? */
87448@@ -1480,7 +1604,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
87449 unsigned long charged = 0;
87450 struct inode *inode = file ? file_inode(file) : NULL;
87451
87452+#ifdef CONFIG_PAX_SEGMEXEC
87453+ struct vm_area_struct *vma_m = NULL;
87454+#endif
87455+
87456+ /*
87457+ * mm->mmap_sem is required to protect against another thread
87458+ * changing the mappings in case we sleep.
87459+ */
87460+ verify_mm_writelocked(mm);
87461+
87462 /* Check against address space limit. */
87463+
87464+#ifdef CONFIG_PAX_RANDMMAP
87465+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
87466+#endif
87467+
87468 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
87469 unsigned long nr_pages;
87470
87471@@ -1499,11 +1638,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
87472
87473 /* Clear old maps */
87474 error = -ENOMEM;
87475-munmap_back:
87476 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
87477 if (do_munmap(mm, addr, len))
87478 return -ENOMEM;
87479- goto munmap_back;
87480+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
87481 }
87482
87483 /*
87484@@ -1534,6 +1672,16 @@ munmap_back:
87485 goto unacct_error;
87486 }
87487
87488+#ifdef CONFIG_PAX_SEGMEXEC
87489+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
87490+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
87491+ if (!vma_m) {
87492+ error = -ENOMEM;
87493+ goto free_vma;
87494+ }
87495+ }
87496+#endif
87497+
87498 vma->vm_mm = mm;
87499 vma->vm_start = addr;
87500 vma->vm_end = addr + len;
87501@@ -1558,6 +1706,13 @@ munmap_back:
87502 if (error)
87503 goto unmap_and_free_vma;
87504
87505+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
87506+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
87507+ vma->vm_flags |= VM_PAGEEXEC;
87508+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
87509+ }
87510+#endif
87511+
87512 /* Can addr have changed??
87513 *
87514 * Answer: Yes, several device drivers can do it in their
87515@@ -1596,6 +1751,11 @@ munmap_back:
87516 vma_link(mm, vma, prev, rb_link, rb_parent);
87517 file = vma->vm_file;
87518
87519+#ifdef CONFIG_PAX_SEGMEXEC
87520+ if (vma_m)
87521+ BUG_ON(pax_mirror_vma(vma_m, vma));
87522+#endif
87523+
87524 /* Once vma denies write, undo our temporary denial count */
87525 if (correct_wcount)
87526 atomic_inc(&inode->i_writecount);
87527@@ -1603,6 +1763,7 @@ out:
87528 perf_event_mmap(vma);
87529
87530 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
87531+ track_exec_limit(mm, addr, addr + len, vm_flags);
87532 if (vm_flags & VM_LOCKED) {
87533 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
87534 vma == get_gate_vma(current->mm)))
87535@@ -1626,6 +1787,12 @@ unmap_and_free_vma:
87536 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
87537 charged = 0;
87538 free_vma:
87539+
87540+#ifdef CONFIG_PAX_SEGMEXEC
87541+ if (vma_m)
87542+ kmem_cache_free(vm_area_cachep, vma_m);
87543+#endif
87544+
87545 kmem_cache_free(vm_area_cachep, vma);
87546 unacct_error:
87547 if (charged)
87548@@ -1633,7 +1800,63 @@ unacct_error:
87549 return error;
87550 }
87551
87552-unsigned long unmapped_area(struct vm_unmapped_area_info *info)
87553+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
87554+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
87555+{
87556+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
87557+ return ((prandom_u32() & 0xFF) + 1) << PAGE_SHIFT;
87558+
87559+ return 0;
87560+}
87561+#endif
87562+
87563+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
87564+{
87565+ if (!vma) {
87566+#ifdef CONFIG_STACK_GROWSUP
87567+ if (addr > sysctl_heap_stack_gap)
87568+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
87569+ else
87570+ vma = find_vma(current->mm, 0);
87571+ if (vma && (vma->vm_flags & VM_GROWSUP))
87572+ return false;
87573+#endif
87574+ return true;
87575+ }
87576+
87577+ if (addr + len > vma->vm_start)
87578+ return false;
87579+
87580+ if (vma->vm_flags & VM_GROWSDOWN)
87581+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
87582+#ifdef CONFIG_STACK_GROWSUP
87583+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
87584+ return addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap;
87585+#endif
87586+ else if (offset)
87587+ return offset <= vma->vm_start - addr - len;
87588+
87589+ return true;
87590+}
87591+
87592+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
87593+{
87594+ if (vma->vm_start < len)
87595+ return -ENOMEM;
87596+
87597+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
87598+ if (offset <= vma->vm_start - len)
87599+ return vma->vm_start - len - offset;
87600+ else
87601+ return -ENOMEM;
87602+ }
87603+
87604+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
87605+ return vma->vm_start - len - sysctl_heap_stack_gap;
87606+ return -ENOMEM;
87607+}
87608+
87609+unsigned long unmapped_area(const struct vm_unmapped_area_info *info)
87610 {
87611 /*
87612 * We implement the search by looking for an rbtree node that
87613@@ -1681,11 +1904,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
87614 }
87615 }
87616
87617- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
87618+ gap_start = vma->vm_prev ? vma->vm_prev->vm_end: 0;
87619 check_current:
87620 /* Check if current node has a suitable gap */
87621 if (gap_start > high_limit)
87622 return -ENOMEM;
87623+
87624+ if (gap_end - gap_start > info->threadstack_offset)
87625+ gap_start += info->threadstack_offset;
87626+ else
87627+ gap_start = gap_end;
87628+
87629+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
87630+ if (gap_end - gap_start > sysctl_heap_stack_gap)
87631+ gap_start += sysctl_heap_stack_gap;
87632+ else
87633+ gap_start = gap_end;
87634+ }
87635+ if (vma->vm_flags & VM_GROWSDOWN) {
87636+ if (gap_end - gap_start > sysctl_heap_stack_gap)
87637+ gap_end -= sysctl_heap_stack_gap;
87638+ else
87639+ gap_end = gap_start;
87640+ }
87641 if (gap_end >= low_limit && gap_end - gap_start >= length)
87642 goto found;
87643
87644@@ -1735,7 +1976,7 @@ found:
87645 return gap_start;
87646 }
87647
87648-unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
87649+unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info)
87650 {
87651 struct mm_struct *mm = current->mm;
87652 struct vm_area_struct *vma;
87653@@ -1789,6 +2030,24 @@ check_current:
87654 gap_end = vma->vm_start;
87655 if (gap_end < low_limit)
87656 return -ENOMEM;
87657+
87658+ if (gap_end - gap_start > info->threadstack_offset)
87659+ gap_end -= info->threadstack_offset;
87660+ else
87661+ gap_end = gap_start;
87662+
87663+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
87664+ if (gap_end - gap_start > sysctl_heap_stack_gap)
87665+ gap_start += sysctl_heap_stack_gap;
87666+ else
87667+ gap_start = gap_end;
87668+ }
87669+ if (vma->vm_flags & VM_GROWSDOWN) {
87670+ if (gap_end - gap_start > sysctl_heap_stack_gap)
87671+ gap_end -= sysctl_heap_stack_gap;
87672+ else
87673+ gap_end = gap_start;
87674+ }
87675 if (gap_start <= high_limit && gap_end - gap_start >= length)
87676 goto found;
87677
87678@@ -1852,6 +2111,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
87679 struct mm_struct *mm = current->mm;
87680 struct vm_area_struct *vma;
87681 struct vm_unmapped_area_info info;
87682+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
87683
87684 if (len > TASK_SIZE)
87685 return -ENOMEM;
87686@@ -1859,29 +2119,45 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
87687 if (flags & MAP_FIXED)
87688 return addr;
87689
87690+#ifdef CONFIG_PAX_RANDMMAP
87691+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
87692+#endif
87693+
87694 if (addr) {
87695 addr = PAGE_ALIGN(addr);
87696 vma = find_vma(mm, addr);
87697- if (TASK_SIZE - len >= addr &&
87698- (!vma || addr + len <= vma->vm_start))
87699+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
87700 return addr;
87701 }
87702
87703 info.flags = 0;
87704 info.length = len;
87705 info.low_limit = TASK_UNMAPPED_BASE;
87706+
87707+#ifdef CONFIG_PAX_RANDMMAP
87708+ if (mm->pax_flags & MF_PAX_RANDMMAP)
87709+ info.low_limit += mm->delta_mmap;
87710+#endif
87711+
87712 info.high_limit = TASK_SIZE;
87713 info.align_mask = 0;
87714+ info.threadstack_offset = offset;
87715 return vm_unmapped_area(&info);
87716 }
87717 #endif
87718
87719 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
87720 {
87721+
87722+#ifdef CONFIG_PAX_SEGMEXEC
87723+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
87724+ return;
87725+#endif
87726+
87727 /*
87728 * Is this a new hole at the lowest possible address?
87729 */
87730- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
87731+ if (addr >= mm->mmap_base && addr < mm->free_area_cache)
87732 mm->free_area_cache = addr;
87733 }
87734
87735@@ -1899,6 +2175,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
87736 struct mm_struct *mm = current->mm;
87737 unsigned long addr = addr0;
87738 struct vm_unmapped_area_info info;
87739+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
87740
87741 /* requested length too big for entire address space */
87742 if (len > TASK_SIZE)
87743@@ -1907,12 +2184,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
87744 if (flags & MAP_FIXED)
87745 return addr;
87746
87747+#ifdef CONFIG_PAX_RANDMMAP
87748+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
87749+#endif
87750+
87751 /* requesting a specific address */
87752 if (addr) {
87753 addr = PAGE_ALIGN(addr);
87754 vma = find_vma(mm, addr);
87755- if (TASK_SIZE - len >= addr &&
87756- (!vma || addr + len <= vma->vm_start))
87757+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
87758 return addr;
87759 }
87760
87761@@ -1921,6 +2201,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
87762 info.low_limit = PAGE_SIZE;
87763 info.high_limit = mm->mmap_base;
87764 info.align_mask = 0;
87765+ info.threadstack_offset = offset;
87766 addr = vm_unmapped_area(&info);
87767
87768 /*
87769@@ -1933,6 +2214,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
87770 VM_BUG_ON(addr != -ENOMEM);
87771 info.flags = 0;
87772 info.low_limit = TASK_UNMAPPED_BASE;
87773+
87774+#ifdef CONFIG_PAX_RANDMMAP
87775+ if (mm->pax_flags & MF_PAX_RANDMMAP)
87776+ info.low_limit += mm->delta_mmap;
87777+#endif
87778+
87779 info.high_limit = TASK_SIZE;
87780 addr = vm_unmapped_area(&info);
87781 }
87782@@ -1943,6 +2230,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
87783
87784 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
87785 {
87786+
87787+#ifdef CONFIG_PAX_SEGMEXEC
87788+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
87789+ return;
87790+#endif
87791+
87792 /*
87793 * Is this a new hole at the highest possible address?
87794 */
87795@@ -1950,8 +2243,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
87796 mm->free_area_cache = addr;
87797
87798 /* dont allow allocations above current base */
87799- if (mm->free_area_cache > mm->mmap_base)
87800+ if (mm->free_area_cache > mm->mmap_base) {
87801 mm->free_area_cache = mm->mmap_base;
87802+ mm->cached_hole_size = ~0UL;
87803+ }
87804 }
87805
87806 unsigned long
87807@@ -2047,6 +2342,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
87808 return vma;
87809 }
87810
87811+#ifdef CONFIG_PAX_SEGMEXEC
87812+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
87813+{
87814+ struct vm_area_struct *vma_m;
87815+
87816+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
87817+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
87818+ BUG_ON(vma->vm_mirror);
87819+ return NULL;
87820+ }
87821+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
87822+ vma_m = vma->vm_mirror;
87823+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
87824+ BUG_ON(vma->vm_file != vma_m->vm_file);
87825+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
87826+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
87827+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
87828+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
87829+ return vma_m;
87830+}
87831+#endif
87832+
87833 /*
87834 * Verify that the stack growth is acceptable and
87835 * update accounting. This is shared with both the
87836@@ -2063,6 +2380,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
87837 return -ENOMEM;
87838
87839 /* Stack limit test */
87840+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
87841 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
87842 return -ENOMEM;
87843
87844@@ -2073,6 +2391,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
87845 locked = mm->locked_vm + grow;
87846 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
87847 limit >>= PAGE_SHIFT;
87848+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
87849 if (locked > limit && !capable(CAP_IPC_LOCK))
87850 return -ENOMEM;
87851 }
87852@@ -2102,37 +2421,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
87853 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
87854 * vma is the last one with address > vma->vm_end. Have to extend vma.
87855 */
87856+#ifndef CONFIG_IA64
87857+static
87858+#endif
87859 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
87860 {
87861 int error;
87862+ bool locknext;
87863
87864 if (!(vma->vm_flags & VM_GROWSUP))
87865 return -EFAULT;
87866
87867+ /* Also guard against wrapping around to address 0. */
87868+ if (address < PAGE_ALIGN(address+1))
87869+ address = PAGE_ALIGN(address+1);
87870+ else
87871+ return -ENOMEM;
87872+
87873 /*
87874 * We must make sure the anon_vma is allocated
87875 * so that the anon_vma locking is not a noop.
87876 */
87877 if (unlikely(anon_vma_prepare(vma)))
87878 return -ENOMEM;
87879+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
87880+ if (locknext && anon_vma_prepare(vma->vm_next))
87881+ return -ENOMEM;
87882 vma_lock_anon_vma(vma);
87883+ if (locknext)
87884+ vma_lock_anon_vma(vma->vm_next);
87885
87886 /*
87887 * vma->vm_start/vm_end cannot change under us because the caller
87888 * is required to hold the mmap_sem in read mode. We need the
87889- * anon_vma lock to serialize against concurrent expand_stacks.
87890- * Also guard against wrapping around to address 0.
87891+ * anon_vma locks to serialize against concurrent expand_stacks
87892+ * and expand_upwards.
87893 */
87894- if (address < PAGE_ALIGN(address+4))
87895- address = PAGE_ALIGN(address+4);
87896- else {
87897- vma_unlock_anon_vma(vma);
87898- return -ENOMEM;
87899- }
87900 error = 0;
87901
87902 /* Somebody else might have raced and expanded it already */
87903- if (address > vma->vm_end) {
87904+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
87905+ error = -ENOMEM;
87906+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
87907 unsigned long size, grow;
87908
87909 size = address - vma->vm_start;
87910@@ -2167,6 +2497,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
87911 }
87912 }
87913 }
87914+ if (locknext)
87915+ vma_unlock_anon_vma(vma->vm_next);
87916 vma_unlock_anon_vma(vma);
87917 khugepaged_enter_vma_merge(vma);
87918 validate_mm(vma->vm_mm);
87919@@ -2181,6 +2513,8 @@ int expand_downwards(struct vm_area_struct *vma,
87920 unsigned long address)
87921 {
87922 int error;
87923+ bool lockprev = false;
87924+ struct vm_area_struct *prev;
87925
87926 /*
87927 * We must make sure the anon_vma is allocated
87928@@ -2194,6 +2528,15 @@ int expand_downwards(struct vm_area_struct *vma,
87929 if (error)
87930 return error;
87931
87932+ prev = vma->vm_prev;
87933+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
87934+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
87935+#endif
87936+ if (lockprev && anon_vma_prepare(prev))
87937+ return -ENOMEM;
87938+ if (lockprev)
87939+ vma_lock_anon_vma(prev);
87940+
87941 vma_lock_anon_vma(vma);
87942
87943 /*
87944@@ -2203,9 +2546,17 @@ int expand_downwards(struct vm_area_struct *vma,
87945 */
87946
87947 /* Somebody else might have raced and expanded it already */
87948- if (address < vma->vm_start) {
87949+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
87950+ error = -ENOMEM;
87951+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
87952 unsigned long size, grow;
87953
87954+#ifdef CONFIG_PAX_SEGMEXEC
87955+ struct vm_area_struct *vma_m;
87956+
87957+ vma_m = pax_find_mirror_vma(vma);
87958+#endif
87959+
87960 size = vma->vm_end - address;
87961 grow = (vma->vm_start - address) >> PAGE_SHIFT;
87962
87963@@ -2230,13 +2581,27 @@ int expand_downwards(struct vm_area_struct *vma,
87964 vma->vm_pgoff -= grow;
87965 anon_vma_interval_tree_post_update_vma(vma);
87966 vma_gap_update(vma);
87967+
87968+#ifdef CONFIG_PAX_SEGMEXEC
87969+ if (vma_m) {
87970+ anon_vma_interval_tree_pre_update_vma(vma_m);
87971+ vma_m->vm_start -= grow << PAGE_SHIFT;
87972+ vma_m->vm_pgoff -= grow;
87973+ anon_vma_interval_tree_post_update_vma(vma_m);
87974+ vma_gap_update(vma_m);
87975+ }
87976+#endif
87977+
87978 spin_unlock(&vma->vm_mm->page_table_lock);
87979
87980+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
87981 perf_event_mmap(vma);
87982 }
87983 }
87984 }
87985 vma_unlock_anon_vma(vma);
87986+ if (lockprev)
87987+ vma_unlock_anon_vma(prev);
87988 khugepaged_enter_vma_merge(vma);
87989 validate_mm(vma->vm_mm);
87990 return error;
87991@@ -2334,6 +2699,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
87992 do {
87993 long nrpages = vma_pages(vma);
87994
87995+#ifdef CONFIG_PAX_SEGMEXEC
87996+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
87997+ vma = remove_vma(vma);
87998+ continue;
87999+ }
88000+#endif
88001+
88002 if (vma->vm_flags & VM_ACCOUNT)
88003 nr_accounted += nrpages;
88004 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
88005@@ -2379,6 +2751,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
88006 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
88007 vma->vm_prev = NULL;
88008 do {
88009+
88010+#ifdef CONFIG_PAX_SEGMEXEC
88011+ if (vma->vm_mirror) {
88012+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
88013+ vma->vm_mirror->vm_mirror = NULL;
88014+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
88015+ vma->vm_mirror = NULL;
88016+ }
88017+#endif
88018+
88019 vma_rb_erase(vma, &mm->mm_rb);
88020 mm->map_count--;
88021 tail_vma = vma;
88022@@ -2410,14 +2792,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
88023 struct vm_area_struct *new;
88024 int err = -ENOMEM;
88025
88026+#ifdef CONFIG_PAX_SEGMEXEC
88027+ struct vm_area_struct *vma_m, *new_m = NULL;
88028+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
88029+#endif
88030+
88031 if (is_vm_hugetlb_page(vma) && (addr &
88032 ~(huge_page_mask(hstate_vma(vma)))))
88033 return -EINVAL;
88034
88035+#ifdef CONFIG_PAX_SEGMEXEC
88036+ vma_m = pax_find_mirror_vma(vma);
88037+#endif
88038+
88039 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
88040 if (!new)
88041 goto out_err;
88042
88043+#ifdef CONFIG_PAX_SEGMEXEC
88044+ if (vma_m) {
88045+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
88046+ if (!new_m) {
88047+ kmem_cache_free(vm_area_cachep, new);
88048+ goto out_err;
88049+ }
88050+ }
88051+#endif
88052+
88053 /* most fields are the same, copy all, and then fixup */
88054 *new = *vma;
88055
88056@@ -2430,6 +2831,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
88057 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
88058 }
88059
88060+#ifdef CONFIG_PAX_SEGMEXEC
88061+ if (vma_m) {
88062+ *new_m = *vma_m;
88063+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
88064+ new_m->vm_mirror = new;
88065+ new->vm_mirror = new_m;
88066+
88067+ if (new_below)
88068+ new_m->vm_end = addr_m;
88069+ else {
88070+ new_m->vm_start = addr_m;
88071+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
88072+ }
88073+ }
88074+#endif
88075+
88076 pol = mpol_dup(vma_policy(vma));
88077 if (IS_ERR(pol)) {
88078 err = PTR_ERR(pol);
88079@@ -2452,6 +2869,36 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
88080 else
88081 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
88082
88083+#ifdef CONFIG_PAX_SEGMEXEC
88084+ if (!err && vma_m) {
88085+ if (anon_vma_clone(new_m, vma_m))
88086+ goto out_free_mpol;
88087+
88088+ mpol_get(pol);
88089+ vma_set_policy(new_m, pol);
88090+
88091+ if (new_m->vm_file)
88092+ get_file(new_m->vm_file);
88093+
88094+ if (new_m->vm_ops && new_m->vm_ops->open)
88095+ new_m->vm_ops->open(new_m);
88096+
88097+ if (new_below)
88098+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
88099+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
88100+ else
88101+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
88102+
88103+ if (err) {
88104+ if (new_m->vm_ops && new_m->vm_ops->close)
88105+ new_m->vm_ops->close(new_m);
88106+ if (new_m->vm_file)
88107+ fput(new_m->vm_file);
88108+ mpol_put(pol);
88109+ }
88110+ }
88111+#endif
88112+
88113 /* Success. */
88114 if (!err)
88115 return 0;
88116@@ -2461,10 +2908,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
88117 new->vm_ops->close(new);
88118 if (new->vm_file)
88119 fput(new->vm_file);
88120- unlink_anon_vmas(new);
88121 out_free_mpol:
88122 mpol_put(pol);
88123 out_free_vma:
88124+
88125+#ifdef CONFIG_PAX_SEGMEXEC
88126+ if (new_m) {
88127+ unlink_anon_vmas(new_m);
88128+ kmem_cache_free(vm_area_cachep, new_m);
88129+ }
88130+#endif
88131+
88132+ unlink_anon_vmas(new);
88133 kmem_cache_free(vm_area_cachep, new);
88134 out_err:
88135 return err;
88136@@ -2477,6 +2932,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
88137 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
88138 unsigned long addr, int new_below)
88139 {
88140+
88141+#ifdef CONFIG_PAX_SEGMEXEC
88142+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
88143+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
88144+ if (mm->map_count >= sysctl_max_map_count-1)
88145+ return -ENOMEM;
88146+ } else
88147+#endif
88148+
88149 if (mm->map_count >= sysctl_max_map_count)
88150 return -ENOMEM;
88151
88152@@ -2488,11 +2952,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
88153 * work. This now handles partial unmappings.
88154 * Jeremy Fitzhardinge <jeremy@goop.org>
88155 */
88156+#ifdef CONFIG_PAX_SEGMEXEC
88157 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
88158 {
88159+ int ret = __do_munmap(mm, start, len);
88160+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
88161+ return ret;
88162+
88163+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
88164+}
88165+
88166+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
88167+#else
88168+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
88169+#endif
88170+{
88171 unsigned long end;
88172 struct vm_area_struct *vma, *prev, *last;
88173
88174+ /*
88175+ * mm->mmap_sem is required to protect against another thread
88176+ * changing the mappings in case we sleep.
88177+ */
88178+ verify_mm_writelocked(mm);
88179+
88180 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
88181 return -EINVAL;
88182
88183@@ -2567,6 +3050,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
88184 /* Fix up all other VM information */
88185 remove_vma_list(mm, vma);
88186
88187+ track_exec_limit(mm, start, end, 0UL);
88188+
88189 return 0;
88190 }
88191
88192@@ -2575,6 +3060,13 @@ int vm_munmap(unsigned long start, size_t len)
88193 int ret;
88194 struct mm_struct *mm = current->mm;
88195
88196+
88197+#ifdef CONFIG_PAX_SEGMEXEC
88198+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
88199+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
88200+ return -EINVAL;
88201+#endif
88202+
88203 down_write(&mm->mmap_sem);
88204 ret = do_munmap(mm, start, len);
88205 up_write(&mm->mmap_sem);
88206@@ -2588,16 +3080,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
88207 return vm_munmap(addr, len);
88208 }
88209
88210-static inline void verify_mm_writelocked(struct mm_struct *mm)
88211-{
88212-#ifdef CONFIG_DEBUG_VM
88213- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
88214- WARN_ON(1);
88215- up_read(&mm->mmap_sem);
88216- }
88217-#endif
88218-}
88219-
88220 /*
88221 * this is really a simplified "do_mmap". it only handles
88222 * anonymous maps. eventually we may be able to do some
88223@@ -2611,6 +3093,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
88224 struct rb_node ** rb_link, * rb_parent;
88225 pgoff_t pgoff = addr >> PAGE_SHIFT;
88226 int error;
88227+ unsigned long charged;
88228
88229 len = PAGE_ALIGN(len);
88230 if (!len)
88231@@ -2618,16 +3101,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
88232
88233 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
88234
88235+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
88236+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
88237+ flags &= ~VM_EXEC;
88238+
88239+#ifdef CONFIG_PAX_MPROTECT
88240+ if (mm->pax_flags & MF_PAX_MPROTECT)
88241+ flags &= ~VM_MAYEXEC;
88242+#endif
88243+
88244+ }
88245+#endif
88246+
88247 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
88248 if (error & ~PAGE_MASK)
88249 return error;
88250
88251+ charged = len >> PAGE_SHIFT;
88252+
88253 /*
88254 * mlock MCL_FUTURE?
88255 */
88256 if (mm->def_flags & VM_LOCKED) {
88257 unsigned long locked, lock_limit;
88258- locked = len >> PAGE_SHIFT;
88259+ locked = charged;
88260 locked += mm->locked_vm;
88261 lock_limit = rlimit(RLIMIT_MEMLOCK);
88262 lock_limit >>= PAGE_SHIFT;
88263@@ -2644,21 +3141,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
88264 /*
88265 * Clear old maps. this also does some error checking for us
88266 */
88267- munmap_back:
88268 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
88269 if (do_munmap(mm, addr, len))
88270 return -ENOMEM;
88271- goto munmap_back;
88272+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
88273 }
88274
88275 /* Check against address space limits *after* clearing old maps... */
88276- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
88277+ if (!may_expand_vm(mm, charged))
88278 return -ENOMEM;
88279
88280 if (mm->map_count > sysctl_max_map_count)
88281 return -ENOMEM;
88282
88283- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
88284+ if (security_vm_enough_memory_mm(mm, charged))
88285 return -ENOMEM;
88286
88287 /* Can we just expand an old private anonymous mapping? */
88288@@ -2672,7 +3168,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
88289 */
88290 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
88291 if (!vma) {
88292- vm_unacct_memory(len >> PAGE_SHIFT);
88293+ vm_unacct_memory(charged);
88294 return -ENOMEM;
88295 }
88296
88297@@ -2686,9 +3182,10 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
88298 vma_link(mm, vma, prev, rb_link, rb_parent);
88299 out:
88300 perf_event_mmap(vma);
88301- mm->total_vm += len >> PAGE_SHIFT;
88302+ mm->total_vm += charged;
88303 if (flags & VM_LOCKED)
88304- mm->locked_vm += (len >> PAGE_SHIFT);
88305+ mm->locked_vm += charged;
88306+ track_exec_limit(mm, addr, addr + len, flags);
88307 return addr;
88308 }
88309
88310@@ -2750,6 +3247,7 @@ void exit_mmap(struct mm_struct *mm)
88311 while (vma) {
88312 if (vma->vm_flags & VM_ACCOUNT)
88313 nr_accounted += vma_pages(vma);
88314+ vma->vm_mirror = NULL;
88315 vma = remove_vma(vma);
88316 }
88317 vm_unacct_memory(nr_accounted);
88318@@ -2766,6 +3264,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
88319 struct vm_area_struct *prev;
88320 struct rb_node **rb_link, *rb_parent;
88321
88322+#ifdef CONFIG_PAX_SEGMEXEC
88323+ struct vm_area_struct *vma_m = NULL;
88324+#endif
88325+
88326+ if (security_mmap_addr(vma->vm_start))
88327+ return -EPERM;
88328+
88329 /*
88330 * The vm_pgoff of a purely anonymous vma should be irrelevant
88331 * until its first write fault, when page's anon_vma and index
88332@@ -2789,7 +3294,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
88333 security_vm_enough_memory_mm(mm, vma_pages(vma)))
88334 return -ENOMEM;
88335
88336+#ifdef CONFIG_PAX_SEGMEXEC
88337+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
88338+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
88339+ if (!vma_m)
88340+ return -ENOMEM;
88341+ }
88342+#endif
88343+
88344 vma_link(mm, vma, prev, rb_link, rb_parent);
88345+
88346+#ifdef CONFIG_PAX_SEGMEXEC
88347+ if (vma_m)
88348+ BUG_ON(pax_mirror_vma(vma_m, vma));
88349+#endif
88350+
88351 return 0;
88352 }
88353
88354@@ -2809,6 +3328,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
88355 struct mempolicy *pol;
88356 bool faulted_in_anon_vma = true;
88357
88358+ BUG_ON(vma->vm_mirror);
88359+
88360 /*
88361 * If anonymous vma has not yet been faulted, update new pgoff
88362 * to match new location, to increase its chance of merging.
88363@@ -2875,6 +3396,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
88364 return NULL;
88365 }
88366
88367+#ifdef CONFIG_PAX_SEGMEXEC
88368+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
88369+{
88370+ struct vm_area_struct *prev_m;
88371+ struct rb_node **rb_link_m, *rb_parent_m;
88372+ struct mempolicy *pol_m;
88373+
88374+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
88375+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
88376+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
88377+ *vma_m = *vma;
88378+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
88379+ if (anon_vma_clone(vma_m, vma))
88380+ return -ENOMEM;
88381+ pol_m = vma_policy(vma_m);
88382+ mpol_get(pol_m);
88383+ vma_set_policy(vma_m, pol_m);
88384+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
88385+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
88386+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
88387+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
88388+ if (vma_m->vm_file)
88389+ get_file(vma_m->vm_file);
88390+ if (vma_m->vm_ops && vma_m->vm_ops->open)
88391+ vma_m->vm_ops->open(vma_m);
88392+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
88393+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
88394+ vma_m->vm_mirror = vma;
88395+ vma->vm_mirror = vma_m;
88396+ return 0;
88397+}
88398+#endif
88399+
88400 /*
88401 * Return true if the calling process may expand its vm space by the passed
88402 * number of pages
88403@@ -2886,6 +3440,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
88404
88405 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
88406
88407+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
88408 if (cur + npages > lim)
88409 return 0;
88410 return 1;
88411@@ -2956,6 +3511,22 @@ int install_special_mapping(struct mm_struct *mm,
88412 vma->vm_start = addr;
88413 vma->vm_end = addr + len;
88414
88415+#ifdef CONFIG_PAX_MPROTECT
88416+ if (mm->pax_flags & MF_PAX_MPROTECT) {
88417+#ifndef CONFIG_PAX_MPROTECT_COMPAT
88418+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
88419+ return -EPERM;
88420+ if (!(vm_flags & VM_EXEC))
88421+ vm_flags &= ~VM_MAYEXEC;
88422+#else
88423+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
88424+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
88425+#endif
88426+ else
88427+ vm_flags &= ~VM_MAYWRITE;
88428+ }
88429+#endif
88430+
88431 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
88432 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
88433
88434diff --git a/mm/mprotect.c b/mm/mprotect.c
88435index 94722a4..e661e29 100644
88436--- a/mm/mprotect.c
88437+++ b/mm/mprotect.c
88438@@ -23,10 +23,18 @@
88439 #include <linux/mmu_notifier.h>
88440 #include <linux/migrate.h>
88441 #include <linux/perf_event.h>
88442+#include <linux/sched/sysctl.h>
88443+
88444+#ifdef CONFIG_PAX_MPROTECT
88445+#include <linux/elf.h>
88446+#include <linux/binfmts.h>
88447+#endif
88448+
88449 #include <asm/uaccess.h>
88450 #include <asm/pgtable.h>
88451 #include <asm/cacheflush.h>
88452 #include <asm/tlbflush.h>
88453+#include <asm/mmu_context.h>
88454
88455 #ifndef pgprot_modify
88456 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
88457@@ -233,6 +241,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
88458 return pages;
88459 }
88460
88461+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
88462+/* called while holding the mmap semaphor for writing except stack expansion */
88463+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
88464+{
88465+ unsigned long oldlimit, newlimit = 0UL;
88466+
88467+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
88468+ return;
88469+
88470+ spin_lock(&mm->page_table_lock);
88471+ oldlimit = mm->context.user_cs_limit;
88472+ if ((prot & VM_EXEC) && oldlimit < end)
88473+ /* USER_CS limit moved up */
88474+ newlimit = end;
88475+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
88476+ /* USER_CS limit moved down */
88477+ newlimit = start;
88478+
88479+ if (newlimit) {
88480+ mm->context.user_cs_limit = newlimit;
88481+
88482+#ifdef CONFIG_SMP
88483+ wmb();
88484+ cpus_clear(mm->context.cpu_user_cs_mask);
88485+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
88486+#endif
88487+
88488+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
88489+ }
88490+ spin_unlock(&mm->page_table_lock);
88491+ if (newlimit == end) {
88492+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
88493+
88494+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
88495+ if (is_vm_hugetlb_page(vma))
88496+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
88497+ else
88498+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
88499+ }
88500+}
88501+#endif
88502+
88503 int
88504 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
88505 unsigned long start, unsigned long end, unsigned long newflags)
88506@@ -245,11 +295,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
88507 int error;
88508 int dirty_accountable = 0;
88509
88510+#ifdef CONFIG_PAX_SEGMEXEC
88511+ struct vm_area_struct *vma_m = NULL;
88512+ unsigned long start_m, end_m;
88513+
88514+ start_m = start + SEGMEXEC_TASK_SIZE;
88515+ end_m = end + SEGMEXEC_TASK_SIZE;
88516+#endif
88517+
88518 if (newflags == oldflags) {
88519 *pprev = vma;
88520 return 0;
88521 }
88522
88523+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
88524+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
88525+
88526+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
88527+ return -ENOMEM;
88528+
88529+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
88530+ return -ENOMEM;
88531+ }
88532+
88533 /*
88534 * If we make a private mapping writable we increase our commit;
88535 * but (without finer accounting) cannot reduce our commit if we
88536@@ -266,6 +334,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
88537 }
88538 }
88539
88540+#ifdef CONFIG_PAX_SEGMEXEC
88541+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
88542+ if (start != vma->vm_start) {
88543+ error = split_vma(mm, vma, start, 1);
88544+ if (error)
88545+ goto fail;
88546+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
88547+ *pprev = (*pprev)->vm_next;
88548+ }
88549+
88550+ if (end != vma->vm_end) {
88551+ error = split_vma(mm, vma, end, 0);
88552+ if (error)
88553+ goto fail;
88554+ }
88555+
88556+ if (pax_find_mirror_vma(vma)) {
88557+ error = __do_munmap(mm, start_m, end_m - start_m);
88558+ if (error)
88559+ goto fail;
88560+ } else {
88561+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
88562+ if (!vma_m) {
88563+ error = -ENOMEM;
88564+ goto fail;
88565+ }
88566+ vma->vm_flags = newflags;
88567+ error = pax_mirror_vma(vma_m, vma);
88568+ if (error) {
88569+ vma->vm_flags = oldflags;
88570+ goto fail;
88571+ }
88572+ }
88573+ }
88574+#endif
88575+
88576 /*
88577 * First try to merge with previous and/or next vma.
88578 */
88579@@ -296,9 +400,21 @@ success:
88580 * vm_flags and vm_page_prot are protected by the mmap_sem
88581 * held in write mode.
88582 */
88583+
88584+#ifdef CONFIG_PAX_SEGMEXEC
88585+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
88586+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
88587+#endif
88588+
88589 vma->vm_flags = newflags;
88590+
88591+#ifdef CONFIG_PAX_MPROTECT
88592+ if (mm->binfmt && mm->binfmt->handle_mprotect)
88593+ mm->binfmt->handle_mprotect(vma, newflags);
88594+#endif
88595+
88596 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
88597- vm_get_page_prot(newflags));
88598+ vm_get_page_prot(vma->vm_flags));
88599
88600 if (vma_wants_writenotify(vma)) {
88601 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
88602@@ -337,6 +453,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
88603 end = start + len;
88604 if (end <= start)
88605 return -ENOMEM;
88606+
88607+#ifdef CONFIG_PAX_SEGMEXEC
88608+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
88609+ if (end > SEGMEXEC_TASK_SIZE)
88610+ return -EINVAL;
88611+ } else
88612+#endif
88613+
88614+ if (end > TASK_SIZE)
88615+ return -EINVAL;
88616+
88617 if (!arch_validate_prot(prot))
88618 return -EINVAL;
88619
88620@@ -344,7 +471,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
88621 /*
88622 * Does the application expect PROT_READ to imply PROT_EXEC:
88623 */
88624- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
88625+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
88626 prot |= PROT_EXEC;
88627
88628 vm_flags = calc_vm_prot_bits(prot);
88629@@ -376,6 +503,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
88630 if (start > vma->vm_start)
88631 prev = vma;
88632
88633+#ifdef CONFIG_PAX_MPROTECT
88634+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
88635+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
88636+#endif
88637+
88638 for (nstart = start ; ; ) {
88639 unsigned long newflags;
88640
88641@@ -386,6 +518,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
88642
88643 /* newflags >> 4 shift VM_MAY% in place of VM_% */
88644 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
88645+ if (prot & (PROT_WRITE | PROT_EXEC))
88646+ gr_log_rwxmprotect(vma);
88647+
88648+ error = -EACCES;
88649+ goto out;
88650+ }
88651+
88652+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
88653 error = -EACCES;
88654 goto out;
88655 }
88656@@ -400,6 +540,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
88657 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
88658 if (error)
88659 goto out;
88660+
88661+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
88662+
88663 nstart = tmp;
88664
88665 if (nstart < prev->vm_end)
88666diff --git a/mm/mremap.c b/mm/mremap.c
88667index 463a257..c0c7a92 100644
88668--- a/mm/mremap.c
88669+++ b/mm/mremap.c
88670@@ -126,6 +126,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
88671 continue;
88672 pte = ptep_get_and_clear(mm, old_addr, old_pte);
88673 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
88674+
88675+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
88676+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
88677+ pte = pte_exprotect(pte);
88678+#endif
88679+
88680 set_pte_at(mm, new_addr, new_pte, pte);
88681 }
88682
88683@@ -318,6 +324,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
88684 if (is_vm_hugetlb_page(vma))
88685 goto Einval;
88686
88687+#ifdef CONFIG_PAX_SEGMEXEC
88688+ if (pax_find_mirror_vma(vma))
88689+ goto Einval;
88690+#endif
88691+
88692 /* We can't remap across vm area boundaries */
88693 if (old_len > vma->vm_end - addr)
88694 goto Efault;
88695@@ -373,20 +384,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
88696 unsigned long ret = -EINVAL;
88697 unsigned long charged = 0;
88698 unsigned long map_flags;
88699+ unsigned long pax_task_size = TASK_SIZE;
88700
88701 if (new_addr & ~PAGE_MASK)
88702 goto out;
88703
88704- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
88705+#ifdef CONFIG_PAX_SEGMEXEC
88706+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
88707+ pax_task_size = SEGMEXEC_TASK_SIZE;
88708+#endif
88709+
88710+ pax_task_size -= PAGE_SIZE;
88711+
88712+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
88713 goto out;
88714
88715 /* Check if the location we're moving into overlaps the
88716 * old location at all, and fail if it does.
88717 */
88718- if ((new_addr <= addr) && (new_addr+new_len) > addr)
88719- goto out;
88720-
88721- if ((addr <= new_addr) && (addr+old_len) > new_addr)
88722+ if (addr + old_len > new_addr && new_addr + new_len > addr)
88723 goto out;
88724
88725 ret = do_munmap(mm, new_addr, new_len);
88726@@ -455,6 +471,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
88727 unsigned long ret = -EINVAL;
88728 unsigned long charged = 0;
88729 bool locked = false;
88730+ unsigned long pax_task_size = TASK_SIZE;
88731
88732 down_write(&current->mm->mmap_sem);
88733
88734@@ -475,6 +492,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
88735 if (!new_len)
88736 goto out;
88737
88738+#ifdef CONFIG_PAX_SEGMEXEC
88739+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
88740+ pax_task_size = SEGMEXEC_TASK_SIZE;
88741+#endif
88742+
88743+ pax_task_size -= PAGE_SIZE;
88744+
88745+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
88746+ old_len > pax_task_size || addr > pax_task_size-old_len)
88747+ goto out;
88748+
88749 if (flags & MREMAP_FIXED) {
88750 if (flags & MREMAP_MAYMOVE)
88751 ret = mremap_to(addr, old_len, new_addr, new_len,
88752@@ -524,6 +552,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
88753 new_addr = addr;
88754 }
88755 ret = addr;
88756+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
88757 goto out;
88758 }
88759 }
88760@@ -547,7 +576,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
88761 goto out;
88762 }
88763
88764+ map_flags = vma->vm_flags;
88765 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
88766+ if (!(ret & ~PAGE_MASK)) {
88767+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
88768+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
88769+ }
88770 }
88771 out:
88772 if (ret & ~PAGE_MASK)
88773diff --git a/mm/nommu.c b/mm/nommu.c
88774index 298884d..5f74980 100644
88775--- a/mm/nommu.c
88776+++ b/mm/nommu.c
88777@@ -65,7 +65,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
88778 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
88779 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
88780 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
88781-int heap_stack_gap = 0;
88782
88783 atomic_long_t mmap_pages_allocated;
88784
88785@@ -842,15 +841,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
88786 EXPORT_SYMBOL(find_vma);
88787
88788 /*
88789- * find a VMA
88790- * - we don't extend stack VMAs under NOMMU conditions
88791- */
88792-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
88793-{
88794- return find_vma(mm, addr);
88795-}
88796-
88797-/*
88798 * expand a stack to a given address
88799 * - not supported under NOMMU conditions
88800 */
88801@@ -1561,6 +1551,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
88802
88803 /* most fields are the same, copy all, and then fixup */
88804 *new = *vma;
88805+ INIT_LIST_HEAD(&new->anon_vma_chain);
88806 *region = *vma->vm_region;
88807 new->vm_region = region;
88808
88809@@ -1995,8 +1986,8 @@ int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
88810 }
88811 EXPORT_SYMBOL(generic_file_remap_pages);
88812
88813-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
88814- unsigned long addr, void *buf, int len, int write)
88815+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
88816+ unsigned long addr, void *buf, size_t len, int write)
88817 {
88818 struct vm_area_struct *vma;
88819
88820@@ -2037,8 +2028,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
88821 *
88822 * The caller must hold a reference on @mm.
88823 */
88824-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
88825- void *buf, int len, int write)
88826+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
88827+ void *buf, size_t len, int write)
88828 {
88829 return __access_remote_vm(NULL, mm, addr, buf, len, write);
88830 }
88831@@ -2047,7 +2038,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
88832 * Access another process' address space.
88833 * - source/target buffer must be kernel space
88834 */
88835-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
88836+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
88837 {
88838 struct mm_struct *mm;
88839
88840diff --git a/mm/page-writeback.c b/mm/page-writeback.c
88841index 4514ad7..92eaa1c 100644
88842--- a/mm/page-writeback.c
88843+++ b/mm/page-writeback.c
88844@@ -659,7 +659,7 @@ unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
88845 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
88846 * - the bdi dirty thresh drops quickly due to change of JBOD workload
88847 */
88848-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
88849+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
88850 unsigned long thresh,
88851 unsigned long bg_thresh,
88852 unsigned long dirty,
88853@@ -1634,7 +1634,7 @@ ratelimit_handler(struct notifier_block *self, unsigned long action,
88854 }
88855 }
88856
88857-static struct notifier_block __cpuinitdata ratelimit_nb = {
88858+static struct notifier_block ratelimit_nb = {
88859 .notifier_call = ratelimit_handler,
88860 .next = NULL,
88861 };
88862diff --git a/mm/page_alloc.c b/mm/page_alloc.c
88863index 2ee0fd3..6e2edfb 100644
88864--- a/mm/page_alloc.c
88865+++ b/mm/page_alloc.c
88866@@ -60,6 +60,7 @@
88867 #include <linux/page-debug-flags.h>
88868 #include <linux/hugetlb.h>
88869 #include <linux/sched/rt.h>
88870+#include <linux/random.h>
88871
88872 #include <asm/tlbflush.h>
88873 #include <asm/div64.h>
88874@@ -345,7 +346,7 @@ out:
88875 * This usage means that zero-order pages may not be compound.
88876 */
88877
88878-static void free_compound_page(struct page *page)
88879+void free_compound_page(struct page *page)
88880 {
88881 __free_pages_ok(page, compound_order(page));
88882 }
88883@@ -702,6 +703,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
88884 int i;
88885 int bad = 0;
88886
88887+#ifdef CONFIG_PAX_MEMORY_SANITIZE
88888+ unsigned long index = 1UL << order;
88889+#endif
88890+
88891 trace_mm_page_free(page, order);
88892 kmemcheck_free_shadow(page, order);
88893
88894@@ -717,6 +722,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
88895 debug_check_no_obj_freed(page_address(page),
88896 PAGE_SIZE << order);
88897 }
88898+
88899+#ifdef CONFIG_PAX_MEMORY_SANITIZE
88900+ for (; index; --index)
88901+ sanitize_highpage(page + index - 1);
88902+#endif
88903+
88904 arch_free_page(page, order);
88905 kernel_map_pages(page, 1 << order, 0);
88906
88907@@ -739,6 +750,19 @@ static void __free_pages_ok(struct page *page, unsigned int order)
88908 local_irq_restore(flags);
88909 }
88910
88911+#ifdef CONFIG_PAX_LATENT_ENTROPY
88912+bool __meminitdata extra_latent_entropy;
88913+
88914+static int __init setup_pax_extra_latent_entropy(char *str)
88915+{
88916+ extra_latent_entropy = true;
88917+ return 0;
88918+}
88919+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
88920+
88921+volatile u64 latent_entropy;
88922+#endif
88923+
88924 /*
88925 * Read access to zone->managed_pages is safe because it's unsigned long,
88926 * but we still need to serialize writers. Currently all callers of
88927@@ -761,6 +785,19 @@ void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
88928 set_page_count(p, 0);
88929 }
88930
88931+#ifdef CONFIG_PAX_LATENT_ENTROPY
88932+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
88933+ u64 hash = 0;
88934+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
88935+ const u64 *data = lowmem_page_address(page);
88936+
88937+ for (index = 0; index < end; index++)
88938+ hash ^= hash + data[index];
88939+ latent_entropy ^= hash;
88940+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
88941+ }
88942+#endif
88943+
88944 page_zone(page)->managed_pages += 1 << order;
88945 set_page_refcounted(page);
88946 __free_pages(page, order);
88947@@ -870,8 +907,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
88948 arch_alloc_page(page, order);
88949 kernel_map_pages(page, 1 << order, 1);
88950
88951+#ifndef CONFIG_PAX_MEMORY_SANITIZE
88952 if (gfp_flags & __GFP_ZERO)
88953 prep_zero_page(page, order, gfp_flags);
88954+#endif
88955
88956 if (order && (gfp_flags & __GFP_COMP))
88957 prep_compound_page(page, order);
88958diff --git a/mm/page_io.c b/mm/page_io.c
88959index a8a3ef4..7260a60 100644
88960--- a/mm/page_io.c
88961+++ b/mm/page_io.c
88962@@ -214,7 +214,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
88963 struct file *swap_file = sis->swap_file;
88964 struct address_space *mapping = swap_file->f_mapping;
88965 struct iovec iov = {
88966- .iov_base = kmap(page),
88967+ .iov_base = (void __force_user *)kmap(page),
88968 .iov_len = PAGE_SIZE,
88969 };
88970
88971diff --git a/mm/percpu.c b/mm/percpu.c
88972index 8c8e08f..73a5cda 100644
88973--- a/mm/percpu.c
88974+++ b/mm/percpu.c
88975@@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
88976 static unsigned int pcpu_high_unit_cpu __read_mostly;
88977
88978 /* the address of the first chunk which starts with the kernel static area */
88979-void *pcpu_base_addr __read_mostly;
88980+void *pcpu_base_addr __read_only;
88981 EXPORT_SYMBOL_GPL(pcpu_base_addr);
88982
88983 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
88984diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
88985index fd26d04..0cea1b0 100644
88986--- a/mm/process_vm_access.c
88987+++ b/mm/process_vm_access.c
88988@@ -13,6 +13,7 @@
88989 #include <linux/uio.h>
88990 #include <linux/sched.h>
88991 #include <linux/highmem.h>
88992+#include <linux/security.h>
88993 #include <linux/ptrace.h>
88994 #include <linux/slab.h>
88995 #include <linux/syscalls.h>
88996@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
88997 size_t iov_l_curr_offset = 0;
88998 ssize_t iov_len;
88999
89000+ return -ENOSYS; // PaX: until properly audited
89001+
89002 /*
89003 * Work out how many pages of struct pages we're going to need
89004 * when eventually calling get_user_pages
89005 */
89006 for (i = 0; i < riovcnt; i++) {
89007 iov_len = rvec[i].iov_len;
89008- if (iov_len > 0) {
89009- nr_pages_iov = ((unsigned long)rvec[i].iov_base
89010- + iov_len)
89011- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
89012- / PAGE_SIZE + 1;
89013- nr_pages = max(nr_pages, nr_pages_iov);
89014- }
89015+ if (iov_len <= 0)
89016+ continue;
89017+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
89018+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
89019+ nr_pages = max(nr_pages, nr_pages_iov);
89020 }
89021
89022 if (nr_pages == 0)
89023@@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
89024 goto free_proc_pages;
89025 }
89026
89027+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
89028+ rc = -EPERM;
89029+ goto put_task_struct;
89030+ }
89031+
89032 mm = mm_access(task, PTRACE_MODE_ATTACH);
89033 if (!mm || IS_ERR(mm)) {
89034 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
89035diff --git a/mm/rmap.c b/mm/rmap.c
89036index 6280da8..b5c090e 100644
89037--- a/mm/rmap.c
89038+++ b/mm/rmap.c
89039@@ -163,6 +163,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
89040 struct anon_vma *anon_vma = vma->anon_vma;
89041 struct anon_vma_chain *avc;
89042
89043+#ifdef CONFIG_PAX_SEGMEXEC
89044+ struct anon_vma_chain *avc_m = NULL;
89045+#endif
89046+
89047 might_sleep();
89048 if (unlikely(!anon_vma)) {
89049 struct mm_struct *mm = vma->vm_mm;
89050@@ -172,6 +176,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
89051 if (!avc)
89052 goto out_enomem;
89053
89054+#ifdef CONFIG_PAX_SEGMEXEC
89055+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
89056+ if (!avc_m)
89057+ goto out_enomem_free_avc;
89058+#endif
89059+
89060 anon_vma = find_mergeable_anon_vma(vma);
89061 allocated = NULL;
89062 if (!anon_vma) {
89063@@ -185,6 +195,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
89064 /* page_table_lock to protect against threads */
89065 spin_lock(&mm->page_table_lock);
89066 if (likely(!vma->anon_vma)) {
89067+
89068+#ifdef CONFIG_PAX_SEGMEXEC
89069+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
89070+
89071+ if (vma_m) {
89072+ BUG_ON(vma_m->anon_vma);
89073+ vma_m->anon_vma = anon_vma;
89074+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
89075+ avc_m = NULL;
89076+ }
89077+#endif
89078+
89079 vma->anon_vma = anon_vma;
89080 anon_vma_chain_link(vma, avc, anon_vma);
89081 allocated = NULL;
89082@@ -195,12 +217,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
89083
89084 if (unlikely(allocated))
89085 put_anon_vma(allocated);
89086+
89087+#ifdef CONFIG_PAX_SEGMEXEC
89088+ if (unlikely(avc_m))
89089+ anon_vma_chain_free(avc_m);
89090+#endif
89091+
89092 if (unlikely(avc))
89093 anon_vma_chain_free(avc);
89094 }
89095 return 0;
89096
89097 out_enomem_free_avc:
89098+
89099+#ifdef CONFIG_PAX_SEGMEXEC
89100+ if (avc_m)
89101+ anon_vma_chain_free(avc_m);
89102+#endif
89103+
89104 anon_vma_chain_free(avc);
89105 out_enomem:
89106 return -ENOMEM;
89107@@ -236,7 +270,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
89108 * Attach the anon_vmas from src to dst.
89109 * Returns 0 on success, -ENOMEM on failure.
89110 */
89111-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
89112+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
89113 {
89114 struct anon_vma_chain *avc, *pavc;
89115 struct anon_vma *root = NULL;
89116@@ -269,7 +303,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
89117 * the corresponding VMA in the parent process is attached to.
89118 * Returns 0 on success, non-zero on failure.
89119 */
89120-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
89121+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
89122 {
89123 struct anon_vma_chain *avc;
89124 struct anon_vma *anon_vma;
89125@@ -373,8 +407,10 @@ static void anon_vma_ctor(void *data)
89126 void __init anon_vma_init(void)
89127 {
89128 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
89129- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
89130- anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
89131+ 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE,
89132+ anon_vma_ctor);
89133+ anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
89134+ SLAB_PANIC|SLAB_NO_SANITIZE);
89135 }
89136
89137 /*
89138diff --git a/mm/shmem.c b/mm/shmem.c
89139index 5e6a842..b41916e 100644
89140--- a/mm/shmem.c
89141+++ b/mm/shmem.c
89142@@ -33,7 +33,7 @@
89143 #include <linux/swap.h>
89144 #include <linux/aio.h>
89145
89146-static struct vfsmount *shm_mnt;
89147+struct vfsmount *shm_mnt;
89148
89149 #ifdef CONFIG_SHMEM
89150 /*
89151@@ -77,7 +77,7 @@ static struct vfsmount *shm_mnt;
89152 #define BOGO_DIRENT_SIZE 20
89153
89154 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
89155-#define SHORT_SYMLINK_LEN 128
89156+#define SHORT_SYMLINK_LEN 64
89157
89158 /*
89159 * shmem_fallocate and shmem_writepage communicate via inode->i_private
89160@@ -2203,6 +2203,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
89161 static int shmem_xattr_validate(const char *name)
89162 {
89163 struct { const char *prefix; size_t len; } arr[] = {
89164+
89165+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
89166+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
89167+#endif
89168+
89169 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
89170 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
89171 };
89172@@ -2258,6 +2263,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
89173 if (err)
89174 return err;
89175
89176+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
89177+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
89178+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
89179+ return -EOPNOTSUPP;
89180+ if (size > 8)
89181+ return -EINVAL;
89182+ }
89183+#endif
89184+
89185 return simple_xattr_set(&info->xattrs, name, value, size, flags);
89186 }
89187
89188@@ -2570,8 +2584,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
89189 int err = -ENOMEM;
89190
89191 /* Round up to L1_CACHE_BYTES to resist false sharing */
89192- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
89193- L1_CACHE_BYTES), GFP_KERNEL);
89194+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
89195 if (!sbinfo)
89196 return -ENOMEM;
89197
89198diff --git a/mm/slab.c b/mm/slab.c
89199index bd88411..2d46fd6 100644
89200--- a/mm/slab.c
89201+++ b/mm/slab.c
89202@@ -366,10 +366,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
89203 if ((x)->max_freeable < i) \
89204 (x)->max_freeable = i; \
89205 } while (0)
89206-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
89207-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
89208-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
89209-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
89210+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
89211+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
89212+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
89213+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
89214+#define STATS_INC_SANITIZED(x) atomic_inc_unchecked(&(x)->sanitized)
89215+#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
89216 #else
89217 #define STATS_INC_ACTIVE(x) do { } while (0)
89218 #define STATS_DEC_ACTIVE(x) do { } while (0)
89219@@ -386,6 +388,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
89220 #define STATS_INC_ALLOCMISS(x) do { } while (0)
89221 #define STATS_INC_FREEHIT(x) do { } while (0)
89222 #define STATS_INC_FREEMISS(x) do { } while (0)
89223+#define STATS_INC_SANITIZED(x) do { } while (0)
89224+#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
89225 #endif
89226
89227 #if DEBUG
89228@@ -477,7 +481,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
89229 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
89230 */
89231 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
89232- const struct slab *slab, void *obj)
89233+ const struct slab *slab, const void *obj)
89234 {
89235 u32 offset = (obj - slab->s_mem);
89236 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
89237@@ -1384,7 +1388,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
89238 return notifier_from_errno(err);
89239 }
89240
89241-static struct notifier_block __cpuinitdata cpucache_notifier = {
89242+static struct notifier_block cpucache_notifier = {
89243 &cpuup_callback, NULL, 0
89244 };
89245
89246@@ -1565,12 +1569,12 @@ void __init kmem_cache_init(void)
89247 */
89248
89249 kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac",
89250- kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS);
89251+ kmalloc_size(INDEX_AC), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
89252
89253 if (INDEX_AC != INDEX_NODE)
89254 kmalloc_caches[INDEX_NODE] =
89255 create_kmalloc_cache("kmalloc-node",
89256- kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
89257+ kmalloc_size(INDEX_NODE), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
89258
89259 slab_early_init = 0;
89260
89261@@ -3583,6 +3587,21 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
89262 struct array_cache *ac = cpu_cache_get(cachep);
89263
89264 check_irq_off();
89265+
89266+#ifdef CONFIG_PAX_MEMORY_SANITIZE
89267+ if (pax_sanitize_slab) {
89268+ if (!(cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))) {
89269+ memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
89270+
89271+ if (cachep->ctor)
89272+ cachep->ctor(objp);
89273+
89274+ STATS_INC_SANITIZED(cachep);
89275+ } else
89276+ STATS_INC_NOT_SANITIZED(cachep);
89277+ }
89278+#endif
89279+
89280 kmemleak_free_recursive(objp, cachep->flags);
89281 objp = cache_free_debugcheck(cachep, objp, caller);
89282
89283@@ -3800,6 +3819,7 @@ void kfree(const void *objp)
89284
89285 if (unlikely(ZERO_OR_NULL_PTR(objp)))
89286 return;
89287+ VM_BUG_ON(!virt_addr_valid(objp));
89288 local_irq_save(flags);
89289 kfree_debugcheck(objp);
89290 c = virt_to_cache(objp);
89291@@ -4241,14 +4261,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
89292 }
89293 /* cpu stats */
89294 {
89295- unsigned long allochit = atomic_read(&cachep->allochit);
89296- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
89297- unsigned long freehit = atomic_read(&cachep->freehit);
89298- unsigned long freemiss = atomic_read(&cachep->freemiss);
89299+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
89300+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
89301+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
89302+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
89303
89304 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
89305 allochit, allocmiss, freehit, freemiss);
89306 }
89307+#ifdef CONFIG_PAX_MEMORY_SANITIZE
89308+ {
89309+ unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
89310+ unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
89311+
89312+ seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
89313+ }
89314+#endif
89315 #endif
89316 }
89317
89318@@ -4476,13 +4504,71 @@ static const struct file_operations proc_slabstats_operations = {
89319 static int __init slab_proc_init(void)
89320 {
89321 #ifdef CONFIG_DEBUG_SLAB_LEAK
89322- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
89323+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
89324 #endif
89325 return 0;
89326 }
89327 module_init(slab_proc_init);
89328 #endif
89329
89330+bool is_usercopy_object(const void *ptr)
89331+{
89332+ struct page *page;
89333+ struct kmem_cache *cachep;
89334+
89335+ if (ZERO_OR_NULL_PTR(ptr))
89336+ return false;
89337+
89338+ if (!slab_is_available())
89339+ return false;
89340+
89341+ if (!virt_addr_valid(ptr))
89342+ return false;
89343+
89344+ page = virt_to_head_page(ptr);
89345+
89346+ if (!PageSlab(page))
89347+ return false;
89348+
89349+ cachep = page->slab_cache;
89350+ return cachep->flags & SLAB_USERCOPY;
89351+}
89352+
89353+#ifdef CONFIG_PAX_USERCOPY
89354+const char *check_heap_object(const void *ptr, unsigned long n)
89355+{
89356+ struct page *page;
89357+ struct kmem_cache *cachep;
89358+ struct slab *slabp;
89359+ unsigned int objnr;
89360+ unsigned long offset;
89361+
89362+ if (ZERO_OR_NULL_PTR(ptr))
89363+ return "<null>";
89364+
89365+ if (!virt_addr_valid(ptr))
89366+ return NULL;
89367+
89368+ page = virt_to_head_page(ptr);
89369+
89370+ if (!PageSlab(page))
89371+ return NULL;
89372+
89373+ cachep = page->slab_cache;
89374+ if (!(cachep->flags & SLAB_USERCOPY))
89375+ return cachep->name;
89376+
89377+ slabp = page->slab_page;
89378+ objnr = obj_to_index(cachep, slabp, ptr);
89379+ BUG_ON(objnr >= cachep->num);
89380+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
89381+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
89382+ return NULL;
89383+
89384+ return cachep->name;
89385+}
89386+#endif
89387+
89388 /**
89389 * ksize - get the actual amount of memory allocated for a given object
89390 * @objp: Pointer to the object
89391diff --git a/mm/slab.h b/mm/slab.h
89392index f96b49e..db1d204 100644
89393--- a/mm/slab.h
89394+++ b/mm/slab.h
89395@@ -32,6 +32,15 @@ extern struct list_head slab_caches;
89396 /* The slab cache that manages slab cache information */
89397 extern struct kmem_cache *kmem_cache;
89398
89399+#ifdef CONFIG_PAX_MEMORY_SANITIZE
89400+#ifdef CONFIG_X86_64
89401+#define PAX_MEMORY_SANITIZE_VALUE '\xfe'
89402+#else
89403+#define PAX_MEMORY_SANITIZE_VALUE '\xff'
89404+#endif
89405+extern bool pax_sanitize_slab;
89406+#endif
89407+
89408 unsigned long calculate_alignment(unsigned long flags,
89409 unsigned long align, unsigned long size);
89410
89411@@ -67,7 +76,8 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
89412
89413 /* Legal flag mask for kmem_cache_create(), for various configurations */
89414 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
89415- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
89416+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | \
89417+ SLAB_USERCOPY | SLAB_NO_SANITIZE)
89418
89419 #if defined(CONFIG_DEBUG_SLAB)
89420 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
89421@@ -229,6 +239,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
89422 return s;
89423
89424 page = virt_to_head_page(x);
89425+
89426+ BUG_ON(!PageSlab(page));
89427+
89428 cachep = page->slab_cache;
89429 if (slab_equal_or_root(cachep, s))
89430 return cachep;
89431diff --git a/mm/slab_common.c b/mm/slab_common.c
89432index 2d41450..4efe6ee 100644
89433--- a/mm/slab_common.c
89434+++ b/mm/slab_common.c
89435@@ -22,11 +22,22 @@
89436
89437 #include "slab.h"
89438
89439-enum slab_state slab_state;
89440+enum slab_state slab_state __read_only;
89441 LIST_HEAD(slab_caches);
89442 DEFINE_MUTEX(slab_mutex);
89443 struct kmem_cache *kmem_cache;
89444
89445+#ifdef CONFIG_PAX_MEMORY_SANITIZE
89446+bool pax_sanitize_slab __read_only = true;
89447+static int __init pax_sanitize_slab_setup(char *str)
89448+{
89449+ pax_sanitize_slab = !!simple_strtol(str, NULL, 0);
89450+ printk("%sabled PaX slab sanitization\n", pax_sanitize_slab ? "En" : "Dis");
89451+ return 1;
89452+}
89453+__setup("pax_sanitize_slab=", pax_sanitize_slab_setup);
89454+#endif
89455+
89456 #ifdef CONFIG_DEBUG_VM
89457 static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
89458 size_t size)
89459@@ -209,7 +220,7 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
89460
89461 err = __kmem_cache_create(s, flags);
89462 if (!err) {
89463- s->refcount = 1;
89464+ atomic_set(&s->refcount, 1);
89465 list_add(&s->list, &slab_caches);
89466 memcg_cache_list_add(memcg, s);
89467 } else {
89468@@ -255,8 +266,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
89469
89470 get_online_cpus();
89471 mutex_lock(&slab_mutex);
89472- s->refcount--;
89473- if (!s->refcount) {
89474+ if (atomic_dec_and_test(&s->refcount)) {
89475 list_del(&s->list);
89476
89477 if (!__kmem_cache_shutdown(s)) {
89478@@ -302,7 +312,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
89479 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
89480 name, size, err);
89481
89482- s->refcount = -1; /* Exempt from merging for now */
89483+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
89484 }
89485
89486 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
89487@@ -315,7 +325,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
89488
89489 create_boot_cache(s, name, size, flags);
89490 list_add(&s->list, &slab_caches);
89491- s->refcount = 1;
89492+ atomic_set(&s->refcount, 1);
89493 return s;
89494 }
89495
89496@@ -327,6 +337,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
89497 EXPORT_SYMBOL(kmalloc_dma_caches);
89498 #endif
89499
89500+#ifdef CONFIG_PAX_USERCOPY_SLABS
89501+struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
89502+EXPORT_SYMBOL(kmalloc_usercopy_caches);
89503+#endif
89504+
89505 /*
89506 * Conversion table for small slabs sizes / 8 to the index in the
89507 * kmalloc array. This is necessary for slabs < 192 since we have non power
89508@@ -391,6 +406,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
89509 return kmalloc_dma_caches[index];
89510
89511 #endif
89512+
89513+#ifdef CONFIG_PAX_USERCOPY_SLABS
89514+ if (unlikely((flags & GFP_USERCOPY)))
89515+ return kmalloc_usercopy_caches[index];
89516+
89517+#endif
89518+
89519 return kmalloc_caches[index];
89520 }
89521
89522@@ -447,7 +469,7 @@ void __init create_kmalloc_caches(unsigned long flags)
89523 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
89524 if (!kmalloc_caches[i]) {
89525 kmalloc_caches[i] = create_kmalloc_cache(NULL,
89526- 1 << i, flags);
89527+ 1 << i, SLAB_USERCOPY | flags);
89528 }
89529
89530 /*
89531@@ -456,10 +478,10 @@ void __init create_kmalloc_caches(unsigned long flags)
89532 * earlier power of two caches
89533 */
89534 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
89535- kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
89536+ kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, SLAB_USERCOPY | flags);
89537
89538 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
89539- kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
89540+ kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, SLAB_USERCOPY | flags);
89541 }
89542
89543 /* Kmalloc array is now usable */
89544@@ -492,6 +514,23 @@ void __init create_kmalloc_caches(unsigned long flags)
89545 }
89546 }
89547 #endif
89548+
89549+#ifdef CONFIG_PAX_USERCOPY_SLABS
89550+ for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
89551+ struct kmem_cache *s = kmalloc_caches[i];
89552+
89553+ if (s) {
89554+ int size = kmalloc_size(i);
89555+ char *n = kasprintf(GFP_NOWAIT,
89556+ "usercopy-kmalloc-%d", size);
89557+
89558+ BUG_ON(!n);
89559+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(n,
89560+ size, SLAB_USERCOPY | flags);
89561+ }
89562+ }
89563+#endif
89564+
89565 }
89566 #endif /* !CONFIG_SLOB */
89567
89568@@ -516,6 +555,9 @@ void print_slabinfo_header(struct seq_file *m)
89569 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
89570 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
89571 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
89572+#ifdef CONFIG_PAX_MEMORY_SANITIZE
89573+ seq_puts(m, " : pax <sanitized> <not_sanitized>");
89574+#endif
89575 #endif
89576 seq_putc(m, '\n');
89577 }
89578diff --git a/mm/slob.c b/mm/slob.c
89579index eeed4a0..bb0e9ab 100644
89580--- a/mm/slob.c
89581+++ b/mm/slob.c
89582@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
89583 /*
89584 * Return the size of a slob block.
89585 */
89586-static slobidx_t slob_units(slob_t *s)
89587+static slobidx_t slob_units(const slob_t *s)
89588 {
89589 if (s->units > 0)
89590 return s->units;
89591@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
89592 /*
89593 * Return the next free slob block pointer after this one.
89594 */
89595-static slob_t *slob_next(slob_t *s)
89596+static slob_t *slob_next(const slob_t *s)
89597 {
89598 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
89599 slobidx_t next;
89600@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
89601 /*
89602 * Returns true if s is the last free block in its page.
89603 */
89604-static int slob_last(slob_t *s)
89605+static int slob_last(const slob_t *s)
89606 {
89607 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
89608 }
89609
89610-static void *slob_new_pages(gfp_t gfp, int order, int node)
89611+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
89612 {
89613- void *page;
89614+ struct page *page;
89615
89616 #ifdef CONFIG_NUMA
89617 if (node != NUMA_NO_NODE)
89618@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
89619 if (!page)
89620 return NULL;
89621
89622- return page_address(page);
89623+ __SetPageSlab(page);
89624+ return page;
89625 }
89626
89627-static void slob_free_pages(void *b, int order)
89628+static void slob_free_pages(struct page *sp, int order)
89629 {
89630 if (current->reclaim_state)
89631 current->reclaim_state->reclaimed_slab += 1 << order;
89632- free_pages((unsigned long)b, order);
89633+ __ClearPageSlab(sp);
89634+ page_mapcount_reset(sp);
89635+ sp->private = 0;
89636+ __free_pages(sp, order);
89637 }
89638
89639 /*
89640@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
89641
89642 /* Not enough space: must allocate a new page */
89643 if (!b) {
89644- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
89645- if (!b)
89646+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
89647+ if (!sp)
89648 return NULL;
89649- sp = virt_to_page(b);
89650- __SetPageSlab(sp);
89651+ b = page_address(sp);
89652
89653 spin_lock_irqsave(&slob_lock, flags);
89654 sp->units = SLOB_UNITS(PAGE_SIZE);
89655 sp->freelist = b;
89656+ sp->private = 0;
89657 INIT_LIST_HEAD(&sp->list);
89658 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
89659 set_slob_page_free(sp, slob_list);
89660@@ -359,12 +363,15 @@ static void slob_free(void *block, int size)
89661 if (slob_page_free(sp))
89662 clear_slob_page_free(sp);
89663 spin_unlock_irqrestore(&slob_lock, flags);
89664- __ClearPageSlab(sp);
89665- page_mapcount_reset(sp);
89666- slob_free_pages(b, 0);
89667+ slob_free_pages(sp, 0);
89668 return;
89669 }
89670
89671+#ifdef CONFIG_PAX_MEMORY_SANITIZE
89672+ if (pax_sanitize_slab)
89673+ memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
89674+#endif
89675+
89676 if (!slob_page_free(sp)) {
89677 /* This slob page is about to become partially free. Easy! */
89678 sp->units = units;
89679@@ -424,11 +431,10 @@ out:
89680 */
89681
89682 static __always_inline void *
89683-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
89684+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
89685 {
89686- unsigned int *m;
89687- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
89688- void *ret;
89689+ slob_t *m;
89690+ void *ret = NULL;
89691
89692 gfp &= gfp_allowed_mask;
89693
89694@@ -442,23 +448,41 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
89695
89696 if (!m)
89697 return NULL;
89698- *m = size;
89699+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
89700+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
89701+ m[0].units = size;
89702+ m[1].units = align;
89703 ret = (void *)m + align;
89704
89705 trace_kmalloc_node(caller, ret,
89706 size, size + align, gfp, node);
89707 } else {
89708 unsigned int order = get_order(size);
89709+ struct page *page;
89710
89711 if (likely(order))
89712 gfp |= __GFP_COMP;
89713- ret = slob_new_pages(gfp, order, node);
89714+ page = slob_new_pages(gfp, order, node);
89715+ if (page) {
89716+ ret = page_address(page);
89717+ page->private = size;
89718+ }
89719
89720 trace_kmalloc_node(caller, ret,
89721 size, PAGE_SIZE << order, gfp, node);
89722 }
89723
89724- kmemleak_alloc(ret, size, 1, gfp);
89725+ return ret;
89726+}
89727+
89728+static __always_inline void *
89729+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
89730+{
89731+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
89732+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
89733+
89734+ if (!ZERO_OR_NULL_PTR(ret))
89735+ kmemleak_alloc(ret, size, 1, gfp);
89736 return ret;
89737 }
89738
89739@@ -493,34 +517,112 @@ void kfree(const void *block)
89740 return;
89741 kmemleak_free(block);
89742
89743+ VM_BUG_ON(!virt_addr_valid(block));
89744 sp = virt_to_page(block);
89745- if (PageSlab(sp)) {
89746+ VM_BUG_ON(!PageSlab(sp));
89747+ if (!sp->private) {
89748 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
89749- unsigned int *m = (unsigned int *)(block - align);
89750- slob_free(m, *m + align);
89751- } else
89752+ slob_t *m = (slob_t *)(block - align);
89753+ slob_free(m, m[0].units + align);
89754+ } else {
89755+ __ClearPageSlab(sp);
89756+ page_mapcount_reset(sp);
89757+ sp->private = 0;
89758 __free_pages(sp, compound_order(sp));
89759+ }
89760 }
89761 EXPORT_SYMBOL(kfree);
89762
89763+bool is_usercopy_object(const void *ptr)
89764+{
89765+ if (!slab_is_available())
89766+ return false;
89767+
89768+ // PAX: TODO
89769+
89770+ return false;
89771+}
89772+
89773+#ifdef CONFIG_PAX_USERCOPY
89774+const char *check_heap_object(const void *ptr, unsigned long n)
89775+{
89776+ struct page *page;
89777+ const slob_t *free;
89778+ const void *base;
89779+ unsigned long flags;
89780+
89781+ if (ZERO_OR_NULL_PTR(ptr))
89782+ return "<null>";
89783+
89784+ if (!virt_addr_valid(ptr))
89785+ return NULL;
89786+
89787+ page = virt_to_head_page(ptr);
89788+ if (!PageSlab(page))
89789+ return NULL;
89790+
89791+ if (page->private) {
89792+ base = page;
89793+ if (base <= ptr && n <= page->private - (ptr - base))
89794+ return NULL;
89795+ return "<slob>";
89796+ }
89797+
89798+ /* some tricky double walking to find the chunk */
89799+ spin_lock_irqsave(&slob_lock, flags);
89800+ base = (void *)((unsigned long)ptr & PAGE_MASK);
89801+ free = page->freelist;
89802+
89803+ while (!slob_last(free) && (void *)free <= ptr) {
89804+ base = free + slob_units(free);
89805+ free = slob_next(free);
89806+ }
89807+
89808+ while (base < (void *)free) {
89809+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
89810+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
89811+ int offset;
89812+
89813+ if (ptr < base + align)
89814+ break;
89815+
89816+ offset = ptr - base - align;
89817+ if (offset >= m) {
89818+ base += size;
89819+ continue;
89820+ }
89821+
89822+ if (n > m - offset)
89823+ break;
89824+
89825+ spin_unlock_irqrestore(&slob_lock, flags);
89826+ return NULL;
89827+ }
89828+
89829+ spin_unlock_irqrestore(&slob_lock, flags);
89830+ return "<slob>";
89831+}
89832+#endif
89833+
89834 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
89835 size_t ksize(const void *block)
89836 {
89837 struct page *sp;
89838 int align;
89839- unsigned int *m;
89840+ slob_t *m;
89841
89842 BUG_ON(!block);
89843 if (unlikely(block == ZERO_SIZE_PTR))
89844 return 0;
89845
89846 sp = virt_to_page(block);
89847- if (unlikely(!PageSlab(sp)))
89848- return PAGE_SIZE << compound_order(sp);
89849+ VM_BUG_ON(!PageSlab(sp));
89850+ if (sp->private)
89851+ return sp->private;
89852
89853 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
89854- m = (unsigned int *)(block - align);
89855- return SLOB_UNITS(*m) * SLOB_UNIT;
89856+ m = (slob_t *)(block - align);
89857+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
89858 }
89859 EXPORT_SYMBOL(ksize);
89860
89861@@ -536,23 +638,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
89862
89863 void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
89864 {
89865- void *b;
89866+ void *b = NULL;
89867
89868 flags &= gfp_allowed_mask;
89869
89870 lockdep_trace_alloc(flags);
89871
89872+#ifdef CONFIG_PAX_USERCOPY_SLABS
89873+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
89874+#else
89875 if (c->size < PAGE_SIZE) {
89876 b = slob_alloc(c->size, flags, c->align, node);
89877 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
89878 SLOB_UNITS(c->size) * SLOB_UNIT,
89879 flags, node);
89880 } else {
89881- b = slob_new_pages(flags, get_order(c->size), node);
89882+ struct page *sp;
89883+
89884+ sp = slob_new_pages(flags, get_order(c->size), node);
89885+ if (sp) {
89886+ b = page_address(sp);
89887+ sp->private = c->size;
89888+ }
89889 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
89890 PAGE_SIZE << get_order(c->size),
89891 flags, node);
89892 }
89893+#endif
89894
89895 if (c->ctor)
89896 c->ctor(b);
89897@@ -564,10 +676,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
89898
89899 static void __kmem_cache_free(void *b, int size)
89900 {
89901- if (size < PAGE_SIZE)
89902+ struct page *sp;
89903+
89904+ sp = virt_to_page(b);
89905+ BUG_ON(!PageSlab(sp));
89906+ if (!sp->private)
89907 slob_free(b, size);
89908 else
89909- slob_free_pages(b, get_order(size));
89910+ slob_free_pages(sp, get_order(size));
89911 }
89912
89913 static void kmem_rcu_free(struct rcu_head *head)
89914@@ -580,17 +696,31 @@ static void kmem_rcu_free(struct rcu_head *head)
89915
89916 void kmem_cache_free(struct kmem_cache *c, void *b)
89917 {
89918+ int size = c->size;
89919+
89920+#ifdef CONFIG_PAX_USERCOPY_SLABS
89921+ if (size + c->align < PAGE_SIZE) {
89922+ size += c->align;
89923+ b -= c->align;
89924+ }
89925+#endif
89926+
89927 kmemleak_free_recursive(b, c->flags);
89928 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
89929 struct slob_rcu *slob_rcu;
89930- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
89931- slob_rcu->size = c->size;
89932+ slob_rcu = b + (size - sizeof(struct slob_rcu));
89933+ slob_rcu->size = size;
89934 call_rcu(&slob_rcu->head, kmem_rcu_free);
89935 } else {
89936- __kmem_cache_free(b, c->size);
89937+ __kmem_cache_free(b, size);
89938 }
89939
89940+#ifdef CONFIG_PAX_USERCOPY_SLABS
89941+ trace_kfree(_RET_IP_, b);
89942+#else
89943 trace_kmem_cache_free(_RET_IP_, b);
89944+#endif
89945+
89946 }
89947 EXPORT_SYMBOL(kmem_cache_free);
89948
89949diff --git a/mm/slub.c b/mm/slub.c
89950index 57707f0..7857bd3 100644
89951--- a/mm/slub.c
89952+++ b/mm/slub.c
89953@@ -198,7 +198,7 @@ struct track {
89954
89955 enum track_item { TRACK_ALLOC, TRACK_FREE };
89956
89957-#ifdef CONFIG_SYSFS
89958+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
89959 static int sysfs_slab_add(struct kmem_cache *);
89960 static int sysfs_slab_alias(struct kmem_cache *, const char *);
89961 static void sysfs_slab_remove(struct kmem_cache *);
89962@@ -519,7 +519,7 @@ static void print_track(const char *s, struct track *t)
89963 if (!t->addr)
89964 return;
89965
89966- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
89967+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
89968 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
89969 #ifdef CONFIG_STACKTRACE
89970 {
89971@@ -2594,6 +2594,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
89972
89973 slab_free_hook(s, x);
89974
89975+#ifdef CONFIG_PAX_MEMORY_SANITIZE
89976+ if (pax_sanitize_slab && !(s->flags & SLAB_NO_SANITIZE)) {
89977+ memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size);
89978+ if (s->ctor)
89979+ s->ctor(x);
89980+ }
89981+#endif
89982+
89983 redo:
89984 /*
89985 * Determine the currently cpus per cpu slab.
89986@@ -2661,7 +2669,7 @@ static int slub_min_objects;
89987 * Merge control. If this is set then no merging of slab caches will occur.
89988 * (Could be removed. This was introduced to pacify the merge skeptics.)
89989 */
89990-static int slub_nomerge;
89991+static int slub_nomerge = 1;
89992
89993 /*
89994 * Calculate the order of allocation given an slab object size.
89995@@ -2938,6 +2946,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
89996 s->inuse = size;
89997
89998 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
89999+#ifdef CONFIG_PAX_MEMORY_SANITIZE
90000+ (pax_sanitize_slab && !(flags & SLAB_NO_SANITIZE)) ||
90001+#endif
90002 s->ctor)) {
90003 /*
90004 * Relocate free pointer after the object if it is not
90005@@ -3283,6 +3294,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
90006 EXPORT_SYMBOL(__kmalloc_node);
90007 #endif
90008
90009+bool is_usercopy_object(const void *ptr)
90010+{
90011+ struct page *page;
90012+ struct kmem_cache *s;
90013+
90014+ if (ZERO_OR_NULL_PTR(ptr))
90015+ return false;
90016+
90017+ if (!slab_is_available())
90018+ return false;
90019+
90020+ if (!virt_addr_valid(ptr))
90021+ return false;
90022+
90023+ page = virt_to_head_page(ptr);
90024+
90025+ if (!PageSlab(page))
90026+ return false;
90027+
90028+ s = page->slab_cache;
90029+ return s->flags & SLAB_USERCOPY;
90030+}
90031+
90032+#ifdef CONFIG_PAX_USERCOPY
90033+const char *check_heap_object(const void *ptr, unsigned long n)
90034+{
90035+ struct page *page;
90036+ struct kmem_cache *s;
90037+ unsigned long offset;
90038+
90039+ if (ZERO_OR_NULL_PTR(ptr))
90040+ return "<null>";
90041+
90042+ if (!virt_addr_valid(ptr))
90043+ return NULL;
90044+
90045+ page = virt_to_head_page(ptr);
90046+
90047+ if (!PageSlab(page))
90048+ return NULL;
90049+
90050+ s = page->slab_cache;
90051+ if (!(s->flags & SLAB_USERCOPY))
90052+ return s->name;
90053+
90054+ offset = (ptr - page_address(page)) % s->size;
90055+ if (offset <= s->object_size && n <= s->object_size - offset)
90056+ return NULL;
90057+
90058+ return s->name;
90059+}
90060+#endif
90061+
90062 size_t ksize(const void *object)
90063 {
90064 struct page *page;
90065@@ -3347,6 +3411,7 @@ void kfree(const void *x)
90066 if (unlikely(ZERO_OR_NULL_PTR(x)))
90067 return;
90068
90069+ VM_BUG_ON(!virt_addr_valid(x));
90070 page = virt_to_head_page(x);
90071 if (unlikely(!PageSlab(page))) {
90072 BUG_ON(!PageCompound(page));
90073@@ -3652,7 +3717,7 @@ static int slab_unmergeable(struct kmem_cache *s)
90074 /*
90075 * We may have set a slab to be unmergeable during bootstrap.
90076 */
90077- if (s->refcount < 0)
90078+ if (atomic_read(&s->refcount) < 0)
90079 return 1;
90080
90081 return 0;
90082@@ -3710,7 +3775,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
90083
90084 s = find_mergeable(memcg, size, align, flags, name, ctor);
90085 if (s) {
90086- s->refcount++;
90087+ atomic_inc(&s->refcount);
90088 /*
90089 * Adjust the object sizes so that we clear
90090 * the complete object on kzalloc.
90091@@ -3719,7 +3784,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
90092 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
90093
90094 if (sysfs_slab_alias(s, name)) {
90095- s->refcount--;
90096+ atomic_dec(&s->refcount);
90097 s = NULL;
90098 }
90099 }
90100@@ -3781,7 +3846,7 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
90101 return NOTIFY_OK;
90102 }
90103
90104-static struct notifier_block __cpuinitdata slab_notifier = {
90105+static struct notifier_block slab_notifier = {
90106 .notifier_call = slab_cpuup_callback
90107 };
90108
90109@@ -3839,7 +3904,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
90110 }
90111 #endif
90112
90113-#ifdef CONFIG_SYSFS
90114+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
90115 static int count_inuse(struct page *page)
90116 {
90117 return page->inuse;
90118@@ -4226,12 +4291,12 @@ static void resiliency_test(void)
90119 validate_slab_cache(kmalloc_caches[9]);
90120 }
90121 #else
90122-#ifdef CONFIG_SYSFS
90123+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
90124 static void resiliency_test(void) {};
90125 #endif
90126 #endif
90127
90128-#ifdef CONFIG_SYSFS
90129+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
90130 enum slab_stat_type {
90131 SL_ALL, /* All slabs */
90132 SL_PARTIAL, /* Only partially allocated slabs */
90133@@ -4475,7 +4540,7 @@ SLAB_ATTR_RO(ctor);
90134
90135 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
90136 {
90137- return sprintf(buf, "%d\n", s->refcount - 1);
90138+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
90139 }
90140 SLAB_ATTR_RO(aliases);
90141
90142@@ -4563,6 +4628,14 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
90143 SLAB_ATTR_RO(cache_dma);
90144 #endif
90145
90146+#ifdef CONFIG_PAX_USERCOPY_SLABS
90147+static ssize_t usercopy_show(struct kmem_cache *s, char *buf)
90148+{
90149+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_USERCOPY));
90150+}
90151+SLAB_ATTR_RO(usercopy);
90152+#endif
90153+
90154 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
90155 {
90156 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
90157@@ -4897,6 +4970,9 @@ static struct attribute *slab_attrs[] = {
90158 #ifdef CONFIG_ZONE_DMA
90159 &cache_dma_attr.attr,
90160 #endif
90161+#ifdef CONFIG_PAX_USERCOPY_SLABS
90162+ &usercopy_attr.attr,
90163+#endif
90164 #ifdef CONFIG_NUMA
90165 &remote_node_defrag_ratio_attr.attr,
90166 #endif
90167@@ -5128,6 +5204,7 @@ static char *create_unique_id(struct kmem_cache *s)
90168 return name;
90169 }
90170
90171+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
90172 static int sysfs_slab_add(struct kmem_cache *s)
90173 {
90174 int err;
90175@@ -5151,7 +5228,7 @@ static int sysfs_slab_add(struct kmem_cache *s)
90176 }
90177
90178 s->kobj.kset = slab_kset;
90179- err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
90180+ err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
90181 if (err) {
90182 kobject_put(&s->kobj);
90183 return err;
90184@@ -5185,6 +5262,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
90185 kobject_del(&s->kobj);
90186 kobject_put(&s->kobj);
90187 }
90188+#endif
90189
90190 /*
90191 * Need to buffer aliases during bootup until sysfs becomes
90192@@ -5198,6 +5276,7 @@ struct saved_alias {
90193
90194 static struct saved_alias *alias_list;
90195
90196+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
90197 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
90198 {
90199 struct saved_alias *al;
90200@@ -5220,6 +5299,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
90201 alias_list = al;
90202 return 0;
90203 }
90204+#endif
90205
90206 static int __init slab_sysfs_init(void)
90207 {
90208diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
90209index 27eeab3..7c3f7f2 100644
90210--- a/mm/sparse-vmemmap.c
90211+++ b/mm/sparse-vmemmap.c
90212@@ -130,7 +130,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
90213 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
90214 if (!p)
90215 return NULL;
90216- pud_populate(&init_mm, pud, p);
90217+ pud_populate_kernel(&init_mm, pud, p);
90218 }
90219 return pud;
90220 }
90221@@ -142,7 +142,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
90222 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
90223 if (!p)
90224 return NULL;
90225- pgd_populate(&init_mm, pgd, p);
90226+ pgd_populate_kernel(&init_mm, pgd, p);
90227 }
90228 return pgd;
90229 }
90230diff --git a/mm/sparse.c b/mm/sparse.c
90231index 1c91f0d3..485470a 100644
90232--- a/mm/sparse.c
90233+++ b/mm/sparse.c
90234@@ -761,7 +761,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
90235
90236 for (i = 0; i < PAGES_PER_SECTION; i++) {
90237 if (PageHWPoison(&memmap[i])) {
90238- atomic_long_sub(1, &num_poisoned_pages);
90239+ atomic_long_sub_unchecked(1, &num_poisoned_pages);
90240 ClearPageHWPoison(&memmap[i]);
90241 }
90242 }
90243diff --git a/mm/swap.c b/mm/swap.c
90244index dfd7d71..ccdf688 100644
90245--- a/mm/swap.c
90246+++ b/mm/swap.c
90247@@ -31,6 +31,7 @@
90248 #include <linux/memcontrol.h>
90249 #include <linux/gfp.h>
90250 #include <linux/uio.h>
90251+#include <linux/hugetlb.h>
90252
90253 #include "internal.h"
90254
90255@@ -73,6 +74,8 @@ static void __put_compound_page(struct page *page)
90256
90257 __page_cache_release(page);
90258 dtor = get_compound_page_dtor(page);
90259+ if (!PageHuge(page))
90260+ BUG_ON(dtor != free_compound_page);
90261 (*dtor)(page);
90262 }
90263
90264diff --git a/mm/swapfile.c b/mm/swapfile.c
90265index 746af55b..7ac94ae 100644
90266--- a/mm/swapfile.c
90267+++ b/mm/swapfile.c
90268@@ -66,7 +66,7 @@ static DEFINE_MUTEX(swapon_mutex);
90269
90270 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
90271 /* Activity counter to indicate that a swapon or swapoff has occurred */
90272-static atomic_t proc_poll_event = ATOMIC_INIT(0);
90273+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
90274
90275 static inline unsigned char swap_count(unsigned char ent)
90276 {
90277@@ -1684,7 +1684,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
90278 }
90279 filp_close(swap_file, NULL);
90280 err = 0;
90281- atomic_inc(&proc_poll_event);
90282+ atomic_inc_unchecked(&proc_poll_event);
90283 wake_up_interruptible(&proc_poll_wait);
90284
90285 out_dput:
90286@@ -1701,8 +1701,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
90287
90288 poll_wait(file, &proc_poll_wait, wait);
90289
90290- if (seq->poll_event != atomic_read(&proc_poll_event)) {
90291- seq->poll_event = atomic_read(&proc_poll_event);
90292+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
90293+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
90294 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
90295 }
90296
90297@@ -1800,7 +1800,7 @@ static int swaps_open(struct inode *inode, struct file *file)
90298 return ret;
90299
90300 seq = file->private_data;
90301- seq->poll_event = atomic_read(&proc_poll_event);
90302+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
90303 return 0;
90304 }
90305
90306@@ -2143,7 +2143,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
90307 (frontswap_map) ? "FS" : "");
90308
90309 mutex_unlock(&swapon_mutex);
90310- atomic_inc(&proc_poll_event);
90311+ atomic_inc_unchecked(&proc_poll_event);
90312 wake_up_interruptible(&proc_poll_wait);
90313
90314 if (S_ISREG(inode->i_mode))
90315diff --git a/mm/util.c b/mm/util.c
90316index ab1424d..7c5bd5a 100644
90317--- a/mm/util.c
90318+++ b/mm/util.c
90319@@ -294,6 +294,12 @@ done:
90320 void arch_pick_mmap_layout(struct mm_struct *mm)
90321 {
90322 mm->mmap_base = TASK_UNMAPPED_BASE;
90323+
90324+#ifdef CONFIG_PAX_RANDMMAP
90325+ if (mm->pax_flags & MF_PAX_RANDMMAP)
90326+ mm->mmap_base += mm->delta_mmap;
90327+#endif
90328+
90329 mm->get_unmapped_area = arch_get_unmapped_area;
90330 mm->unmap_area = arch_unmap_area;
90331 }
90332diff --git a/mm/vmalloc.c b/mm/vmalloc.c
90333index d365724..6cae7c2 100644
90334--- a/mm/vmalloc.c
90335+++ b/mm/vmalloc.c
90336@@ -59,8 +59,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
90337
90338 pte = pte_offset_kernel(pmd, addr);
90339 do {
90340- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
90341- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
90342+
90343+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
90344+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
90345+ BUG_ON(!pte_exec(*pte));
90346+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
90347+ continue;
90348+ }
90349+#endif
90350+
90351+ {
90352+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
90353+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
90354+ }
90355 } while (pte++, addr += PAGE_SIZE, addr != end);
90356 }
90357
90358@@ -120,16 +131,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
90359 pte = pte_alloc_kernel(pmd, addr);
90360 if (!pte)
90361 return -ENOMEM;
90362+
90363+ pax_open_kernel();
90364 do {
90365 struct page *page = pages[*nr];
90366
90367- if (WARN_ON(!pte_none(*pte)))
90368+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
90369+ if (pgprot_val(prot) & _PAGE_NX)
90370+#endif
90371+
90372+ if (!pte_none(*pte)) {
90373+ pax_close_kernel();
90374+ WARN_ON(1);
90375 return -EBUSY;
90376- if (WARN_ON(!page))
90377+ }
90378+ if (!page) {
90379+ pax_close_kernel();
90380+ WARN_ON(1);
90381 return -ENOMEM;
90382+ }
90383 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
90384 (*nr)++;
90385 } while (pte++, addr += PAGE_SIZE, addr != end);
90386+ pax_close_kernel();
90387 return 0;
90388 }
90389
90390@@ -139,7 +163,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
90391 pmd_t *pmd;
90392 unsigned long next;
90393
90394- pmd = pmd_alloc(&init_mm, pud, addr);
90395+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
90396 if (!pmd)
90397 return -ENOMEM;
90398 do {
90399@@ -156,7 +180,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
90400 pud_t *pud;
90401 unsigned long next;
90402
90403- pud = pud_alloc(&init_mm, pgd, addr);
90404+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
90405 if (!pud)
90406 return -ENOMEM;
90407 do {
90408@@ -216,6 +240,12 @@ int is_vmalloc_or_module_addr(const void *x)
90409 if (addr >= MODULES_VADDR && addr < MODULES_END)
90410 return 1;
90411 #endif
90412+
90413+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
90414+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
90415+ return 1;
90416+#endif
90417+
90418 return is_vmalloc_addr(x);
90419 }
90420
90421@@ -236,8 +266,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
90422
90423 if (!pgd_none(*pgd)) {
90424 pud_t *pud = pud_offset(pgd, addr);
90425+#ifdef CONFIG_X86
90426+ if (!pud_large(*pud))
90427+#endif
90428 if (!pud_none(*pud)) {
90429 pmd_t *pmd = pmd_offset(pud, addr);
90430+#ifdef CONFIG_X86
90431+ if (!pmd_large(*pmd))
90432+#endif
90433 if (!pmd_none(*pmd)) {
90434 pte_t *ptep, pte;
90435
90436@@ -339,7 +375,7 @@ static void purge_vmap_area_lazy(void);
90437 * Allocate a region of KVA of the specified size and alignment, within the
90438 * vstart and vend.
90439 */
90440-static struct vmap_area *alloc_vmap_area(unsigned long size,
90441+static __size_overflow(1) struct vmap_area *alloc_vmap_area(unsigned long size,
90442 unsigned long align,
90443 unsigned long vstart, unsigned long vend,
90444 int node, gfp_t gfp_mask)
90445@@ -1337,6 +1373,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
90446 struct vm_struct *area;
90447
90448 BUG_ON(in_interrupt());
90449+
90450+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
90451+ if (flags & VM_KERNEXEC) {
90452+ if (start != VMALLOC_START || end != VMALLOC_END)
90453+ return NULL;
90454+ start = (unsigned long)MODULES_EXEC_VADDR;
90455+ end = (unsigned long)MODULES_EXEC_END;
90456+ }
90457+#endif
90458+
90459 if (flags & VM_IOREMAP) {
90460 int bit = fls(size);
90461
90462@@ -1581,6 +1627,11 @@ void *vmap(struct page **pages, unsigned int count,
90463 if (count > totalram_pages)
90464 return NULL;
90465
90466+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
90467+ if (!(pgprot_val(prot) & _PAGE_NX))
90468+ flags |= VM_KERNEXEC;
90469+#endif
90470+
90471 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
90472 __builtin_return_address(0));
90473 if (!area)
90474@@ -1682,6 +1733,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
90475 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
90476 goto fail;
90477
90478+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
90479+ if (!(pgprot_val(prot) & _PAGE_NX))
90480+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
90481+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
90482+ else
90483+#endif
90484+
90485 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
90486 start, end, node, gfp_mask, caller);
90487 if (!area)
90488@@ -1858,10 +1916,9 @@ EXPORT_SYMBOL(vzalloc_node);
90489 * For tight control over page level allocator and protection flags
90490 * use __vmalloc() instead.
90491 */
90492-
90493 void *vmalloc_exec(unsigned long size)
90494 {
90495- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
90496+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
90497 NUMA_NO_NODE, __builtin_return_address(0));
90498 }
90499
90500@@ -2168,6 +2225,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
90501 unsigned long uaddr = vma->vm_start;
90502 unsigned long usize = vma->vm_end - vma->vm_start;
90503
90504+ BUG_ON(vma->vm_mirror);
90505+
90506 if ((PAGE_SIZE-1) & (unsigned long)addr)
90507 return -EINVAL;
90508
90509@@ -2629,7 +2688,11 @@ static int s_show(struct seq_file *m, void *p)
90510 v->addr, v->addr + v->size, v->size);
90511
90512 if (v->caller)
90513+#ifdef CONFIG_GRKERNSEC_HIDESYM
90514+ seq_printf(m, " %pK", v->caller);
90515+#else
90516 seq_printf(m, " %pS", v->caller);
90517+#endif
90518
90519 if (v->nr_pages)
90520 seq_printf(m, " pages=%d", v->nr_pages);
90521diff --git a/mm/vmstat.c b/mm/vmstat.c
90522index f42745e..62f8346 100644
90523--- a/mm/vmstat.c
90524+++ b/mm/vmstat.c
90525@@ -76,7 +76,7 @@ void vm_events_fold_cpu(int cpu)
90526 *
90527 * vm_stat contains the global counters
90528 */
90529-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
90530+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
90531 EXPORT_SYMBOL(vm_stat);
90532
90533 #ifdef CONFIG_SMP
90534@@ -452,7 +452,7 @@ void refresh_cpu_vm_stats(int cpu)
90535 v = p->vm_stat_diff[i];
90536 p->vm_stat_diff[i] = 0;
90537 local_irq_restore(flags);
90538- atomic_long_add(v, &zone->vm_stat[i]);
90539+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
90540 global_diff[i] += v;
90541 #ifdef CONFIG_NUMA
90542 /* 3 seconds idle till flush */
90543@@ -490,7 +490,7 @@ void refresh_cpu_vm_stats(int cpu)
90544
90545 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
90546 if (global_diff[i])
90547- atomic_long_add(global_diff[i], &vm_stat[i]);
90548+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
90549 }
90550
90551 /*
90552@@ -505,8 +505,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
90553 if (pset->vm_stat_diff[i]) {
90554 int v = pset->vm_stat_diff[i];
90555 pset->vm_stat_diff[i] = 0;
90556- atomic_long_add(v, &zone->vm_stat[i]);
90557- atomic_long_add(v, &vm_stat[i]);
90558+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
90559+ atomic_long_add_unchecked(v, &vm_stat[i]);
90560 }
90561 }
90562 #endif
90563@@ -1226,7 +1226,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
90564 return NOTIFY_OK;
90565 }
90566
90567-static struct notifier_block __cpuinitdata vmstat_notifier =
90568+static struct notifier_block vmstat_notifier =
90569 { &vmstat_cpuup_callback, NULL, 0 };
90570 #endif
90571
90572@@ -1241,10 +1241,20 @@ static int __init setup_vmstat(void)
90573 start_cpu_timer(cpu);
90574 #endif
90575 #ifdef CONFIG_PROC_FS
90576- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
90577- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
90578- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
90579- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
90580+ {
90581+ mode_t gr_mode = S_IRUGO;
90582+#ifdef CONFIG_GRKERNSEC_PROC_ADD
90583+ gr_mode = S_IRUSR;
90584+#endif
90585+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
90586+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
90587+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
90588+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
90589+#else
90590+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
90591+#endif
90592+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
90593+ }
90594 #endif
90595 return 0;
90596 }
90597diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
90598index 9424f37..6aabf19 100644
90599--- a/net/8021q/vlan.c
90600+++ b/net/8021q/vlan.c
90601@@ -469,7 +469,7 @@ out:
90602 return NOTIFY_DONE;
90603 }
90604
90605-static struct notifier_block vlan_notifier_block __read_mostly = {
90606+static struct notifier_block vlan_notifier_block = {
90607 .notifier_call = vlan_device_event,
90608 };
90609
90610@@ -544,8 +544,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
90611 err = -EPERM;
90612 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
90613 break;
90614- if ((args.u.name_type >= 0) &&
90615- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
90616+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
90617 struct vlan_net *vn;
90618
90619 vn = net_generic(net, vlan_net_id);
90620diff --git a/net/9p/mod.c b/net/9p/mod.c
90621index 6ab36ae..6f1841b 100644
90622--- a/net/9p/mod.c
90623+++ b/net/9p/mod.c
90624@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
90625 void v9fs_register_trans(struct p9_trans_module *m)
90626 {
90627 spin_lock(&v9fs_trans_lock);
90628- list_add_tail(&m->list, &v9fs_trans_list);
90629+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
90630 spin_unlock(&v9fs_trans_lock);
90631 }
90632 EXPORT_SYMBOL(v9fs_register_trans);
90633@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
90634 void v9fs_unregister_trans(struct p9_trans_module *m)
90635 {
90636 spin_lock(&v9fs_trans_lock);
90637- list_del_init(&m->list);
90638+ pax_list_del_init((struct list_head *)&m->list);
90639 spin_unlock(&v9fs_trans_lock);
90640 }
90641 EXPORT_SYMBOL(v9fs_unregister_trans);
90642diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
90643index 02efb25..41541a9 100644
90644--- a/net/9p/trans_fd.c
90645+++ b/net/9p/trans_fd.c
90646@@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
90647 oldfs = get_fs();
90648 set_fs(get_ds());
90649 /* The cast to a user pointer is valid due to the set_fs() */
90650- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
90651+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
90652 set_fs(oldfs);
90653
90654 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
90655diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
90656index 876fbe8..8bbea9f 100644
90657--- a/net/atm/atm_misc.c
90658+++ b/net/atm/atm_misc.c
90659@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
90660 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
90661 return 1;
90662 atm_return(vcc, truesize);
90663- atomic_inc(&vcc->stats->rx_drop);
90664+ atomic_inc_unchecked(&vcc->stats->rx_drop);
90665 return 0;
90666 }
90667 EXPORT_SYMBOL(atm_charge);
90668@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
90669 }
90670 }
90671 atm_return(vcc, guess);
90672- atomic_inc(&vcc->stats->rx_drop);
90673+ atomic_inc_unchecked(&vcc->stats->rx_drop);
90674 return NULL;
90675 }
90676 EXPORT_SYMBOL(atm_alloc_charge);
90677@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
90678
90679 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
90680 {
90681-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
90682+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
90683 __SONET_ITEMS
90684 #undef __HANDLE_ITEM
90685 }
90686@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
90687
90688 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
90689 {
90690-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
90691+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
90692 __SONET_ITEMS
90693 #undef __HANDLE_ITEM
90694 }
90695diff --git a/net/atm/lec.h b/net/atm/lec.h
90696index 4149db1..f2ab682 100644
90697--- a/net/atm/lec.h
90698+++ b/net/atm/lec.h
90699@@ -48,7 +48,7 @@ struct lane2_ops {
90700 const u8 *tlvs, u32 sizeoftlvs);
90701 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
90702 const u8 *tlvs, u32 sizeoftlvs);
90703-};
90704+} __no_const;
90705
90706 /*
90707 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
90708diff --git a/net/atm/proc.c b/net/atm/proc.c
90709index bbb6461..cf04016 100644
90710--- a/net/atm/proc.c
90711+++ b/net/atm/proc.c
90712@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
90713 const struct k_atm_aal_stats *stats)
90714 {
90715 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
90716- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
90717- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
90718- atomic_read(&stats->rx_drop));
90719+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
90720+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
90721+ atomic_read_unchecked(&stats->rx_drop));
90722 }
90723
90724 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
90725diff --git a/net/atm/resources.c b/net/atm/resources.c
90726index 0447d5d..3cf4728 100644
90727--- a/net/atm/resources.c
90728+++ b/net/atm/resources.c
90729@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
90730 static void copy_aal_stats(struct k_atm_aal_stats *from,
90731 struct atm_aal_stats *to)
90732 {
90733-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
90734+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
90735 __AAL_STAT_ITEMS
90736 #undef __HANDLE_ITEM
90737 }
90738@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
90739 static void subtract_aal_stats(struct k_atm_aal_stats *from,
90740 struct atm_aal_stats *to)
90741 {
90742-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
90743+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
90744 __AAL_STAT_ITEMS
90745 #undef __HANDLE_ITEM
90746 }
90747diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
90748index d5744b7..506bae3 100644
90749--- a/net/ax25/sysctl_net_ax25.c
90750+++ b/net/ax25/sysctl_net_ax25.c
90751@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
90752 {
90753 char path[sizeof("net/ax25/") + IFNAMSIZ];
90754 int k;
90755- struct ctl_table *table;
90756+ ctl_table_no_const *table;
90757
90758 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
90759 if (!table)
90760diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
90761index f680ee1..97e3542 100644
90762--- a/net/batman-adv/bat_iv_ogm.c
90763+++ b/net/batman-adv/bat_iv_ogm.c
90764@@ -79,7 +79,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
90765
90766 /* randomize initial seqno to avoid collision */
90767 get_random_bytes(&random_seqno, sizeof(random_seqno));
90768- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
90769+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
90770
90771 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
90772 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
90773@@ -627,9 +627,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
90774 batadv_ogm_packet = (struct batadv_ogm_packet *)(*ogm_buff);
90775
90776 /* change sequence number to network order */
90777- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
90778+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
90779 batadv_ogm_packet->seqno = htonl(seqno);
90780- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
90781+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
90782
90783 batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn);
90784 batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc);
90785@@ -1037,7 +1037,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
90786 return;
90787
90788 /* could be changed by schedule_own_packet() */
90789- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
90790+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
90791
90792 if (batadv_ogm_packet->flags & BATADV_DIRECTLINK)
90793 has_directlink_flag = 1;
90794diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
90795index de27b31..7058bfe 100644
90796--- a/net/batman-adv/bridge_loop_avoidance.c
90797+++ b/net/batman-adv/bridge_loop_avoidance.c
90798@@ -1522,6 +1522,8 @@ out:
90799 * in these cases, the skb is further handled by this function and
90800 * returns 1, otherwise it returns 0 and the caller shall further
90801 * process the skb.
90802+ *
90803+ * This call might reallocate skb data.
90804 */
90805 int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid)
90806 {
90807diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
90808index f105219..7614af3 100644
90809--- a/net/batman-adv/gateway_client.c
90810+++ b/net/batman-adv/gateway_client.c
90811@@ -508,6 +508,7 @@ out:
90812 return 0;
90813 }
90814
90815+/* this call might reallocate skb data */
90816 static bool batadv_is_type_dhcprequest(struct sk_buff *skb, int header_len)
90817 {
90818 int ret = false;
90819@@ -568,6 +569,7 @@ out:
90820 return ret;
90821 }
90822
90823+/* this call might reallocate skb data */
90824 bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
90825 {
90826 struct ethhdr *ethhdr;
90827@@ -619,6 +621,12 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
90828
90829 if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr)))
90830 return false;
90831+
90832+ /* skb->data might have been reallocated by pskb_may_pull() */
90833+ ethhdr = (struct ethhdr *)skb->data;
90834+ if (ntohs(ethhdr->h_proto) == ETH_P_8021Q)
90835+ ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN);
90836+
90837 udphdr = (struct udphdr *)(skb->data + *header_len);
90838 *header_len += sizeof(*udphdr);
90839
90840@@ -634,12 +642,14 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
90841 return true;
90842 }
90843
90844+/* this call might reallocate skb data */
90845 bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
90846- struct sk_buff *skb, struct ethhdr *ethhdr)
90847+ struct sk_buff *skb)
90848 {
90849 struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL;
90850 struct batadv_orig_node *orig_dst_node = NULL;
90851 struct batadv_gw_node *curr_gw = NULL;
90852+ struct ethhdr *ethhdr;
90853 bool ret, out_of_range = false;
90854 unsigned int header_len = 0;
90855 uint8_t curr_tq_avg;
90856@@ -648,6 +658,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
90857 if (!ret)
90858 goto out;
90859
90860+ ethhdr = (struct ethhdr *)skb->data;
90861 orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
90862 ethhdr->h_dest);
90863 if (!orig_dst_node)
90864diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
90865index 039902d..1037d75 100644
90866--- a/net/batman-adv/gateway_client.h
90867+++ b/net/batman-adv/gateway_client.h
90868@@ -34,7 +34,6 @@ void batadv_gw_node_delete(struct batadv_priv *bat_priv,
90869 void batadv_gw_node_purge(struct batadv_priv *bat_priv);
90870 int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset);
90871 bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len);
90872-bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
90873- struct sk_buff *skb, struct ethhdr *ethhdr);
90874+bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, struct sk_buff *skb);
90875
90876 #endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */
90877diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
90878index 522243a..b48c0ef 100644
90879--- a/net/batman-adv/hard-interface.c
90880+++ b/net/batman-adv/hard-interface.c
90881@@ -401,7 +401,7 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
90882 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
90883 dev_add_pack(&hard_iface->batman_adv_ptype);
90884
90885- atomic_set(&hard_iface->frag_seqno, 1);
90886+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
90887 batadv_info(hard_iface->soft_iface, "Adding interface: %s\n",
90888 hard_iface->net_dev->name);
90889
90890@@ -550,7 +550,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
90891 /* This can't be called via a bat_priv callback because
90892 * we have no bat_priv yet.
90893 */
90894- atomic_set(&hard_iface->bat_iv.ogm_seqno, 1);
90895+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, 1);
90896 hard_iface->bat_iv.ogm_buff = NULL;
90897
90898 return hard_iface;
90899diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
90900index 819dfb0..226bacd 100644
90901--- a/net/batman-adv/soft-interface.c
90902+++ b/net/batman-adv/soft-interface.c
90903@@ -180,6 +180,9 @@ static int batadv_interface_tx(struct sk_buff *skb,
90904 if (batadv_bla_tx(bat_priv, skb, vid))
90905 goto dropped;
90906
90907+ /* skb->data might have been reallocated by batadv_bla_tx() */
90908+ ethhdr = (struct ethhdr *)skb->data;
90909+
90910 /* Register the client MAC in the transtable */
90911 if (!is_multicast_ether_addr(ethhdr->h_source))
90912 batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
90913@@ -220,6 +223,10 @@ static int batadv_interface_tx(struct sk_buff *skb,
90914 default:
90915 break;
90916 }
90917+
90918+ /* reminder: ethhdr might have become unusable from here on
90919+ * (batadv_gw_is_dhcp_target() might have reallocated skb data)
90920+ */
90921 }
90922
90923 /* ethernet packet should be broadcasted */
90924@@ -253,7 +260,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
90925 primary_if->net_dev->dev_addr, ETH_ALEN);
90926
90927 /* set broadcast sequence number */
90928- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
90929+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
90930 bcast_packet->seqno = htonl(seqno);
90931
90932 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
90933@@ -266,7 +273,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
90934 /* unicast packet */
90935 } else {
90936 if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_OFF) {
90937- ret = batadv_gw_out_of_range(bat_priv, skb, ethhdr);
90938+ ret = batadv_gw_out_of_range(bat_priv, skb);
90939 if (ret)
90940 goto dropped;
90941 }
90942@@ -472,7 +479,7 @@ static int batadv_softif_init_late(struct net_device *dev)
90943 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
90944
90945 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
90946- atomic_set(&bat_priv->bcast_seqno, 1);
90947+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
90948 atomic_set(&bat_priv->tt.vn, 0);
90949 atomic_set(&bat_priv->tt.local_changes, 0);
90950 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
90951diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
90952index aba8364..50fcbb8 100644
90953--- a/net/batman-adv/types.h
90954+++ b/net/batman-adv/types.h
90955@@ -51,7 +51,7 @@
90956 struct batadv_hard_iface_bat_iv {
90957 unsigned char *ogm_buff;
90958 int ogm_buff_len;
90959- atomic_t ogm_seqno;
90960+ atomic_unchecked_t ogm_seqno;
90961 };
90962
90963 /**
90964@@ -75,7 +75,7 @@ struct batadv_hard_iface {
90965 int16_t if_num;
90966 char if_status;
90967 struct net_device *net_dev;
90968- atomic_t frag_seqno;
90969+ atomic_unchecked_t frag_seqno;
90970 struct kobject *hardif_obj;
90971 atomic_t refcount;
90972 struct packet_type batman_adv_ptype;
90973@@ -558,7 +558,7 @@ struct batadv_priv {
90974 #ifdef CONFIG_BATMAN_ADV_DEBUG
90975 atomic_t log_level;
90976 #endif
90977- atomic_t bcast_seqno;
90978+ atomic_unchecked_t bcast_seqno;
90979 atomic_t bcast_queue_left;
90980 atomic_t batman_queue_left;
90981 char num_ifaces;
90982diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
90983index 0bb3b59..0e3052e 100644
90984--- a/net/batman-adv/unicast.c
90985+++ b/net/batman-adv/unicast.c
90986@@ -270,7 +270,7 @@ int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
90987 frag1->flags = BATADV_UNI_FRAG_HEAD | large_tail;
90988 frag2->flags = large_tail;
90989
90990- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
90991+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
90992 frag1->seqno = htons(seqno - 1);
90993 frag2->seqno = htons(seqno);
90994
90995@@ -326,7 +326,9 @@ static bool batadv_unicast_push_and_fill_skb(struct sk_buff *skb, int hdr_size,
90996 * @skb: the skb containing the payload to encapsulate
90997 * @orig_node: the destination node
90998 *
90999- * Returns false if the payload could not be encapsulated or true otherwise
91000+ * Returns false if the payload could not be encapsulated or true otherwise.
91001+ *
91002+ * This call might reallocate skb data.
91003 */
91004 static bool batadv_unicast_prepare_skb(struct sk_buff *skb,
91005 struct batadv_orig_node *orig_node)
91006@@ -343,7 +345,9 @@ static bool batadv_unicast_prepare_skb(struct sk_buff *skb,
91007 * @orig_node: the destination node
91008 * @packet_subtype: the batman 4addr packet subtype to use
91009 *
91010- * Returns false if the payload could not be encapsulated or true otherwise
91011+ * Returns false if the payload could not be encapsulated or true otherwise.
91012+ *
91013+ * This call might reallocate skb data.
91014 */
91015 bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv,
91016 struct sk_buff *skb,
91017@@ -401,7 +405,7 @@ int batadv_unicast_generic_send_skb(struct batadv_priv *bat_priv,
91018 struct batadv_neigh_node *neigh_node;
91019 int data_len = skb->len;
91020 int ret = NET_RX_DROP;
91021- unsigned int dev_mtu;
91022+ unsigned int dev_mtu, header_len;
91023
91024 /* get routing information */
91025 if (is_multicast_ether_addr(ethhdr->h_dest)) {
91026@@ -429,10 +433,12 @@ find_router:
91027 switch (packet_type) {
91028 case BATADV_UNICAST:
91029 batadv_unicast_prepare_skb(skb, orig_node);
91030+ header_len = sizeof(struct batadv_unicast_packet);
91031 break;
91032 case BATADV_UNICAST_4ADDR:
91033 batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node,
91034 packet_subtype);
91035+ header_len = sizeof(struct batadv_unicast_4addr_packet);
91036 break;
91037 default:
91038 /* this function supports UNICAST and UNICAST_4ADDR only. It
91039@@ -441,6 +447,7 @@ find_router:
91040 goto out;
91041 }
91042
91043+ ethhdr = (struct ethhdr *)(skb->data + header_len);
91044 unicast_packet = (struct batadv_unicast_packet *)skb->data;
91045
91046 /* inform the destination node that we are still missing a correct route
91047diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
91048index ace5e55..a65a1c0 100644
91049--- a/net/bluetooth/hci_core.c
91050+++ b/net/bluetooth/hci_core.c
91051@@ -2211,16 +2211,16 @@ int hci_register_dev(struct hci_dev *hdev)
91052 list_add(&hdev->list, &hci_dev_list);
91053 write_unlock(&hci_dev_list_lock);
91054
91055- hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
91056- WQ_MEM_RECLAIM, 1);
91057+ hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
91058+ WQ_MEM_RECLAIM, 1, hdev->name);
91059 if (!hdev->workqueue) {
91060 error = -ENOMEM;
91061 goto err;
91062 }
91063
91064- hdev->req_workqueue = alloc_workqueue(hdev->name,
91065+ hdev->req_workqueue = alloc_workqueue("%s",
91066 WQ_HIGHPRI | WQ_UNBOUND |
91067- WQ_MEM_RECLAIM, 1);
91068+ WQ_MEM_RECLAIM, 1, hdev->name);
91069 if (!hdev->req_workqueue) {
91070 destroy_workqueue(hdev->workqueue);
91071 error = -ENOMEM;
91072diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
91073index 9bd7d95..6c4884f 100644
91074--- a/net/bluetooth/hci_sock.c
91075+++ b/net/bluetooth/hci_sock.c
91076@@ -934,7 +934,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
91077 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
91078 }
91079
91080- len = min_t(unsigned int, len, sizeof(uf));
91081+ len = min((size_t)len, sizeof(uf));
91082 if (copy_from_user(&uf, optval, len)) {
91083 err = -EFAULT;
91084 break;
91085diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
91086index 68843a2..30e9342 100644
91087--- a/net/bluetooth/l2cap_core.c
91088+++ b/net/bluetooth/l2cap_core.c
91089@@ -3507,8 +3507,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
91090 break;
91091
91092 case L2CAP_CONF_RFC:
91093- if (olen == sizeof(rfc))
91094- memcpy(&rfc, (void *)val, olen);
91095+ if (olen != sizeof(rfc))
91096+ break;
91097+
91098+ memcpy(&rfc, (void *)val, olen);
91099
91100 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
91101 rfc.mode != chan->mode)
91102diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
91103index 36fed40..be2eeb2 100644
91104--- a/net/bluetooth/l2cap_sock.c
91105+++ b/net/bluetooth/l2cap_sock.c
91106@@ -485,7 +485,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
91107 struct sock *sk = sock->sk;
91108 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
91109 struct l2cap_options opts;
91110- int len, err = 0;
91111+ int err = 0;
91112+ size_t len = optlen;
91113 u32 opt;
91114
91115 BT_DBG("sk %p", sk);
91116@@ -507,7 +508,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
91117 opts.max_tx = chan->max_tx;
91118 opts.txwin_size = chan->tx_win;
91119
91120- len = min_t(unsigned int, sizeof(opts), optlen);
91121+ len = min(sizeof(opts), len);
91122 if (copy_from_user((char *) &opts, optval, len)) {
91123 err = -EFAULT;
91124 break;
91125@@ -587,7 +588,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
91126 struct bt_security sec;
91127 struct bt_power pwr;
91128 struct l2cap_conn *conn;
91129- int len, err = 0;
91130+ int err = 0;
91131+ size_t len = optlen;
91132 u32 opt;
91133
91134 BT_DBG("sk %p", sk);
91135@@ -610,7 +612,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
91136
91137 sec.level = BT_SECURITY_LOW;
91138
91139- len = min_t(unsigned int, sizeof(sec), optlen);
91140+ len = min(sizeof(sec), len);
91141 if (copy_from_user((char *) &sec, optval, len)) {
91142 err = -EFAULT;
91143 break;
91144@@ -707,7 +709,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
91145
91146 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
91147
91148- len = min_t(unsigned int, sizeof(pwr), optlen);
91149+ len = min(sizeof(pwr), len);
91150 if (copy_from_user((char *) &pwr, optval, len)) {
91151 err = -EFAULT;
91152 break;
91153diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
91154index 30b3721..c1bd0a0 100644
91155--- a/net/bluetooth/rfcomm/sock.c
91156+++ b/net/bluetooth/rfcomm/sock.c
91157@@ -666,7 +666,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
91158 struct sock *sk = sock->sk;
91159 struct bt_security sec;
91160 int err = 0;
91161- size_t len;
91162+ size_t len = optlen;
91163 u32 opt;
91164
91165 BT_DBG("sk %p", sk);
91166@@ -688,7 +688,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
91167
91168 sec.level = BT_SECURITY_LOW;
91169
91170- len = min_t(unsigned int, sizeof(sec), optlen);
91171+ len = min(sizeof(sec), len);
91172 if (copy_from_user((char *) &sec, optval, len)) {
91173 err = -EFAULT;
91174 break;
91175diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
91176index b6e44ad..5b0d514 100644
91177--- a/net/bluetooth/rfcomm/tty.c
91178+++ b/net/bluetooth/rfcomm/tty.c
91179@@ -309,7 +309,7 @@ static void rfcomm_dev_del(struct rfcomm_dev *dev)
91180 BUG_ON(test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags));
91181
91182 spin_lock_irqsave(&dev->port.lock, flags);
91183- if (dev->port.count > 0) {
91184+ if (atomic_read(&dev->port.count) > 0) {
91185 spin_unlock_irqrestore(&dev->port.lock, flags);
91186 return;
91187 }
91188@@ -659,10 +659,10 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
91189 return -ENODEV;
91190
91191 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
91192- dev->channel, dev->port.count);
91193+ dev->channel, atomic_read(&dev->port.count));
91194
91195 spin_lock_irqsave(&dev->port.lock, flags);
91196- if (++dev->port.count > 1) {
91197+ if (atomic_inc_return(&dev->port.count) > 1) {
91198 spin_unlock_irqrestore(&dev->port.lock, flags);
91199 return 0;
91200 }
91201@@ -727,10 +727,10 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
91202 return;
91203
91204 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
91205- dev->port.count);
91206+ atomic_read(&dev->port.count));
91207
91208 spin_lock_irqsave(&dev->port.lock, flags);
91209- if (!--dev->port.count) {
91210+ if (!atomic_dec_return(&dev->port.count)) {
91211 spin_unlock_irqrestore(&dev->port.lock, flags);
91212 if (dev->tty_dev->parent)
91213 device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST);
91214diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
91215index 3d110c4..4e1b2eb 100644
91216--- a/net/bridge/netfilter/ebtables.c
91217+++ b/net/bridge/netfilter/ebtables.c
91218@@ -1525,7 +1525,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
91219 tmp.valid_hooks = t->table->valid_hooks;
91220 }
91221 mutex_unlock(&ebt_mutex);
91222- if (copy_to_user(user, &tmp, *len) != 0){
91223+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
91224 BUGPRINT("c2u Didn't work\n");
91225 ret = -EFAULT;
91226 break;
91227@@ -2331,7 +2331,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
91228 goto out;
91229 tmp.valid_hooks = t->valid_hooks;
91230
91231- if (copy_to_user(user, &tmp, *len) != 0) {
91232+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
91233 ret = -EFAULT;
91234 break;
91235 }
91236@@ -2342,7 +2342,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
91237 tmp.entries_size = t->table->entries_size;
91238 tmp.valid_hooks = t->table->valid_hooks;
91239
91240- if (copy_to_user(user, &tmp, *len) != 0) {
91241+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
91242 ret = -EFAULT;
91243 break;
91244 }
91245diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
91246index 2bd4b58..0dc30a1 100644
91247--- a/net/caif/cfctrl.c
91248+++ b/net/caif/cfctrl.c
91249@@ -10,6 +10,7 @@
91250 #include <linux/spinlock.h>
91251 #include <linux/slab.h>
91252 #include <linux/pkt_sched.h>
91253+#include <linux/sched.h>
91254 #include <net/caif/caif_layer.h>
91255 #include <net/caif/cfpkt.h>
91256 #include <net/caif/cfctrl.h>
91257@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
91258 memset(&dev_info, 0, sizeof(dev_info));
91259 dev_info.id = 0xff;
91260 cfsrvl_init(&this->serv, 0, &dev_info, false);
91261- atomic_set(&this->req_seq_no, 1);
91262- atomic_set(&this->rsp_seq_no, 1);
91263+ atomic_set_unchecked(&this->req_seq_no, 1);
91264+ atomic_set_unchecked(&this->rsp_seq_no, 1);
91265 this->serv.layer.receive = cfctrl_recv;
91266 sprintf(this->serv.layer.name, "ctrl");
91267 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
91268@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
91269 struct cfctrl_request_info *req)
91270 {
91271 spin_lock_bh(&ctrl->info_list_lock);
91272- atomic_inc(&ctrl->req_seq_no);
91273- req->sequence_no = atomic_read(&ctrl->req_seq_no);
91274+ atomic_inc_unchecked(&ctrl->req_seq_no);
91275+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
91276 list_add_tail(&req->list, &ctrl->list);
91277 spin_unlock_bh(&ctrl->info_list_lock);
91278 }
91279@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
91280 if (p != first)
91281 pr_warn("Requests are not received in order\n");
91282
91283- atomic_set(&ctrl->rsp_seq_no,
91284+ atomic_set_unchecked(&ctrl->rsp_seq_no,
91285 p->sequence_no);
91286 list_del(&p->list);
91287 goto out;
91288diff --git a/net/can/af_can.c b/net/can/af_can.c
91289index c4e5085..aa9efdf 100644
91290--- a/net/can/af_can.c
91291+++ b/net/can/af_can.c
91292@@ -862,7 +862,7 @@ static const struct net_proto_family can_family_ops = {
91293 };
91294
91295 /* notifier block for netdevice event */
91296-static struct notifier_block can_netdev_notifier __read_mostly = {
91297+static struct notifier_block can_netdev_notifier = {
91298 .notifier_call = can_notifier,
91299 };
91300
91301diff --git a/net/can/gw.c b/net/can/gw.c
91302index 3ee690e..00d581b 100644
91303--- a/net/can/gw.c
91304+++ b/net/can/gw.c
91305@@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
91306 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
91307
91308 static HLIST_HEAD(cgw_list);
91309-static struct notifier_block notifier;
91310
91311 static struct kmem_cache *cgw_cache __read_mostly;
91312
91313@@ -927,6 +926,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
91314 return err;
91315 }
91316
91317+static struct notifier_block notifier = {
91318+ .notifier_call = cgw_notifier
91319+};
91320+
91321 static __init int cgw_module_init(void)
91322 {
91323 /* sanitize given module parameter */
91324@@ -942,7 +945,6 @@ static __init int cgw_module_init(void)
91325 return -ENOMEM;
91326
91327 /* set notifier */
91328- notifier.notifier_call = cgw_notifier;
91329 register_netdevice_notifier(&notifier);
91330
91331 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
91332diff --git a/net/compat.c b/net/compat.c
91333index f0a1ba6..0541331 100644
91334--- a/net/compat.c
91335+++ b/net/compat.c
91336@@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
91337 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
91338 __get_user(kmsg->msg_flags, &umsg->msg_flags))
91339 return -EFAULT;
91340- kmsg->msg_name = compat_ptr(tmp1);
91341- kmsg->msg_iov = compat_ptr(tmp2);
91342- kmsg->msg_control = compat_ptr(tmp3);
91343+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
91344+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
91345+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
91346 return 0;
91347 }
91348
91349@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
91350
91351 if (kern_msg->msg_namelen) {
91352 if (mode == VERIFY_READ) {
91353- int err = move_addr_to_kernel(kern_msg->msg_name,
91354+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
91355 kern_msg->msg_namelen,
91356 kern_address);
91357 if (err < 0)
91358@@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
91359 kern_msg->msg_name = NULL;
91360
91361 tot_len = iov_from_user_compat_to_kern(kern_iov,
91362- (struct compat_iovec __user *)kern_msg->msg_iov,
91363+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
91364 kern_msg->msg_iovlen);
91365 if (tot_len >= 0)
91366 kern_msg->msg_iov = kern_iov;
91367@@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
91368
91369 #define CMSG_COMPAT_FIRSTHDR(msg) \
91370 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
91371- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
91372+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
91373 (struct compat_cmsghdr __user *)NULL)
91374
91375 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
91376 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
91377 (ucmlen) <= (unsigned long) \
91378 ((mhdr)->msg_controllen - \
91379- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
91380+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
91381
91382 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
91383 struct compat_cmsghdr __user *cmsg, int cmsg_len)
91384 {
91385 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
91386- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
91387+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
91388 msg->msg_controllen)
91389 return NULL;
91390 return (struct compat_cmsghdr __user *)ptr;
91391@@ -219,7 +219,7 @@ Efault:
91392
91393 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
91394 {
91395- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
91396+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
91397 struct compat_cmsghdr cmhdr;
91398 struct compat_timeval ctv;
91399 struct compat_timespec cts[3];
91400@@ -275,7 +275,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
91401
91402 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
91403 {
91404- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
91405+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
91406 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
91407 int fdnum = scm->fp->count;
91408 struct file **fp = scm->fp->fp;
91409@@ -363,7 +363,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
91410 return -EFAULT;
91411 old_fs = get_fs();
91412 set_fs(KERNEL_DS);
91413- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
91414+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
91415 set_fs(old_fs);
91416
91417 return err;
91418@@ -424,7 +424,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
91419 len = sizeof(ktime);
91420 old_fs = get_fs();
91421 set_fs(KERNEL_DS);
91422- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
91423+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
91424 set_fs(old_fs);
91425
91426 if (!err) {
91427@@ -567,7 +567,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
91428 case MCAST_JOIN_GROUP:
91429 case MCAST_LEAVE_GROUP:
91430 {
91431- struct compat_group_req __user *gr32 = (void *)optval;
91432+ struct compat_group_req __user *gr32 = (void __user *)optval;
91433 struct group_req __user *kgr =
91434 compat_alloc_user_space(sizeof(struct group_req));
91435 u32 interface;
91436@@ -588,7 +588,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
91437 case MCAST_BLOCK_SOURCE:
91438 case MCAST_UNBLOCK_SOURCE:
91439 {
91440- struct compat_group_source_req __user *gsr32 = (void *)optval;
91441+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
91442 struct group_source_req __user *kgsr = compat_alloc_user_space(
91443 sizeof(struct group_source_req));
91444 u32 interface;
91445@@ -609,7 +609,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
91446 }
91447 case MCAST_MSFILTER:
91448 {
91449- struct compat_group_filter __user *gf32 = (void *)optval;
91450+ struct compat_group_filter __user *gf32 = (void __user *)optval;
91451 struct group_filter __user *kgf;
91452 u32 interface, fmode, numsrc;
91453
91454@@ -647,7 +647,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
91455 char __user *optval, int __user *optlen,
91456 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
91457 {
91458- struct compat_group_filter __user *gf32 = (void *)optval;
91459+ struct compat_group_filter __user *gf32 = (void __user *)optval;
91460 struct group_filter __user *kgf;
91461 int __user *koptlen;
91462 u32 interface, fmode, numsrc;
91463@@ -805,7 +805,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
91464
91465 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
91466 return -EINVAL;
91467- if (copy_from_user(a, args, nas[call]))
91468+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
91469 return -EFAULT;
91470 a0 = a[0];
91471 a1 = a[1];
91472diff --git a/net/core/datagram.c b/net/core/datagram.c
91473index b71423d..0360434 100644
91474--- a/net/core/datagram.c
91475+++ b/net/core/datagram.c
91476@@ -295,7 +295,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
91477 }
91478
91479 kfree_skb(skb);
91480- atomic_inc(&sk->sk_drops);
91481+ atomic_inc_unchecked(&sk->sk_drops);
91482 sk_mem_reclaim_partial(sk);
91483
91484 return err;
91485diff --git a/net/core/dev.c b/net/core/dev.c
91486index 7ddbb31..3902452 100644
91487--- a/net/core/dev.c
91488+++ b/net/core/dev.c
91489@@ -1649,7 +1649,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
91490 {
91491 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
91492 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
91493- atomic_long_inc(&dev->rx_dropped);
91494+ atomic_long_inc_unchecked(&dev->rx_dropped);
91495 kfree_skb(skb);
91496 return NET_RX_DROP;
91497 }
91498@@ -1658,7 +1658,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
91499 skb_orphan(skb);
91500
91501 if (unlikely(!is_skb_forwardable(dev, skb))) {
91502- atomic_long_inc(&dev->rx_dropped);
91503+ atomic_long_inc_unchecked(&dev->rx_dropped);
91504 kfree_skb(skb);
91505 return NET_RX_DROP;
91506 }
91507@@ -2404,7 +2404,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
91508
91509 struct dev_gso_cb {
91510 void (*destructor)(struct sk_buff *skb);
91511-};
91512+} __no_const;
91513
91514 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
91515
91516@@ -3139,7 +3139,7 @@ enqueue:
91517
91518 local_irq_restore(flags);
91519
91520- atomic_long_inc(&skb->dev->rx_dropped);
91521+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
91522 kfree_skb(skb);
91523 return NET_RX_DROP;
91524 }
91525@@ -3211,7 +3211,7 @@ int netif_rx_ni(struct sk_buff *skb)
91526 }
91527 EXPORT_SYMBOL(netif_rx_ni);
91528
91529-static void net_tx_action(struct softirq_action *h)
91530+static void net_tx_action(void)
91531 {
91532 struct softnet_data *sd = &__get_cpu_var(softnet_data);
91533
91534@@ -3545,7 +3545,7 @@ ncls:
91535 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
91536 } else {
91537 drop:
91538- atomic_long_inc(&skb->dev->rx_dropped);
91539+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
91540 kfree_skb(skb);
91541 /* Jamal, now you will not able to escape explaining
91542 * me how you were going to use this. :-)
91543@@ -4153,7 +4153,7 @@ void netif_napi_del(struct napi_struct *napi)
91544 }
91545 EXPORT_SYMBOL(netif_napi_del);
91546
91547-static void net_rx_action(struct softirq_action *h)
91548+static void net_rx_action(void)
91549 {
91550 struct softnet_data *sd = &__get_cpu_var(softnet_data);
91551 unsigned long time_limit = jiffies + 2;
91552@@ -5590,7 +5590,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
91553 } else {
91554 netdev_stats_to_stats64(storage, &dev->stats);
91555 }
91556- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
91557+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
91558 return storage;
91559 }
91560 EXPORT_SYMBOL(dev_get_stats);
91561diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
91562index 5b7d0e1..cb960fc 100644
91563--- a/net/core/dev_ioctl.c
91564+++ b/net/core/dev_ioctl.c
91565@@ -365,9 +365,13 @@ void dev_load(struct net *net, const char *name)
91566 if (no_module && capable(CAP_NET_ADMIN))
91567 no_module = request_module("netdev-%s", name);
91568 if (no_module && capable(CAP_SYS_MODULE)) {
91569+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91570+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
91571+#else
91572 if (!request_module("%s", name))
91573 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
91574 name);
91575+#endif
91576 }
91577 }
91578 EXPORT_SYMBOL(dev_load);
91579diff --git a/net/core/ethtool.c b/net/core/ethtool.c
91580index ce91766..3b71cdb 100644
91581--- a/net/core/ethtool.c
91582+++ b/net/core/ethtool.c
91583@@ -1319,10 +1319,19 @@ static int ethtool_get_dump_data(struct net_device *dev,
91584 if (ret)
91585 return ret;
91586
91587- len = (tmp.len > dump.len) ? dump.len : tmp.len;
91588+ len = min(tmp.len, dump.len);
91589 if (!len)
91590 return -EFAULT;
91591
91592+ /* Don't ever let the driver think there's more space available
91593+ * than it requested with .get_dump_flag().
91594+ */
91595+ dump.len = len;
91596+
91597+ /* Always allocate enough space to hold the whole thing so that the
91598+ * driver does not need to check the length and bother with partial
91599+ * dumping.
91600+ */
91601 data = vzalloc(tmp.len);
91602 if (!data)
91603 return -ENOMEM;
91604@@ -1330,6 +1339,16 @@ static int ethtool_get_dump_data(struct net_device *dev,
91605 if (ret)
91606 goto out;
91607
91608+ /* There are two sane possibilities:
91609+ * 1. The driver's .get_dump_data() does not touch dump.len.
91610+ * 2. Or it may set dump.len to how much it really writes, which
91611+ * should be tmp.len (or len if it can do a partial dump).
91612+ * In any case respond to userspace with the actual length of data
91613+ * it's receiving.
91614+ */
91615+ WARN_ON(dump.len != len && dump.len != tmp.len);
91616+ dump.len = len;
91617+
91618 if (copy_to_user(useraddr, &dump, sizeof(dump))) {
91619 ret = -EFAULT;
91620 goto out;
91621diff --git a/net/core/flow.c b/net/core/flow.c
91622index 7102f16..146b4bd 100644
91623--- a/net/core/flow.c
91624+++ b/net/core/flow.c
91625@@ -61,7 +61,7 @@ struct flow_cache {
91626 struct timer_list rnd_timer;
91627 };
91628
91629-atomic_t flow_cache_genid = ATOMIC_INIT(0);
91630+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
91631 EXPORT_SYMBOL(flow_cache_genid);
91632 static struct flow_cache flow_cache_global;
91633 static struct kmem_cache *flow_cachep __read_mostly;
91634@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
91635
91636 static int flow_entry_valid(struct flow_cache_entry *fle)
91637 {
91638- if (atomic_read(&flow_cache_genid) != fle->genid)
91639+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
91640 return 0;
91641 if (fle->object && !fle->object->ops->check(fle->object))
91642 return 0;
91643@@ -258,7 +258,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
91644 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
91645 fcp->hash_count++;
91646 }
91647- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
91648+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
91649 flo = fle->object;
91650 if (!flo)
91651 goto ret_object;
91652@@ -279,7 +279,7 @@ nocache:
91653 }
91654 flo = resolver(net, key, family, dir, flo, ctx);
91655 if (fle) {
91656- fle->genid = atomic_read(&flow_cache_genid);
91657+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
91658 if (!IS_ERR(flo))
91659 fle->object = flo;
91660 else
91661diff --git a/net/core/iovec.c b/net/core/iovec.c
91662index de178e4..1dabd8b 100644
91663--- a/net/core/iovec.c
91664+++ b/net/core/iovec.c
91665@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
91666 if (m->msg_namelen) {
91667 if (mode == VERIFY_READ) {
91668 void __user *namep;
91669- namep = (void __user __force *) m->msg_name;
91670+ namep = (void __force_user *) m->msg_name;
91671 err = move_addr_to_kernel(namep, m->msg_namelen,
91672 address);
91673 if (err < 0)
91674@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
91675 }
91676
91677 size = m->msg_iovlen * sizeof(struct iovec);
91678- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
91679+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
91680 return -EFAULT;
91681
91682 m->msg_iov = iov;
91683diff --git a/net/core/neighbour.c b/net/core/neighbour.c
91684index ce90b02..8752627 100644
91685--- a/net/core/neighbour.c
91686+++ b/net/core/neighbour.c
91687@@ -2771,7 +2771,7 @@ static int proc_unres_qlen(ctl_table *ctl, int write, void __user *buffer,
91688 size_t *lenp, loff_t *ppos)
91689 {
91690 int size, ret;
91691- ctl_table tmp = *ctl;
91692+ ctl_table_no_const tmp = *ctl;
91693
91694 tmp.extra1 = &zero;
91695 tmp.extra2 = &unres_qlen_max;
91696diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
91697index 569d355..79cf2d0 100644
91698--- a/net/core/net-procfs.c
91699+++ b/net/core/net-procfs.c
91700@@ -271,8 +271,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
91701 else
91702 seq_printf(seq, "%04x", ntohs(pt->type));
91703
91704+#ifdef CONFIG_GRKERNSEC_HIDESYM
91705+ seq_printf(seq, " %-8s %pf\n",
91706+ pt->dev ? pt->dev->name : "", NULL);
91707+#else
91708 seq_printf(seq, " %-8s %pf\n",
91709 pt->dev ? pt->dev->name : "", pt->func);
91710+#endif
91711 }
91712
91713 return 0;
91714diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
91715index 981fed3..536af34 100644
91716--- a/net/core/net-sysfs.c
91717+++ b/net/core/net-sysfs.c
91718@@ -1311,7 +1311,7 @@ void netdev_class_remove_file(struct class_attribute *class_attr)
91719 }
91720 EXPORT_SYMBOL(netdev_class_remove_file);
91721
91722-int netdev_kobject_init(void)
91723+int __init netdev_kobject_init(void)
91724 {
91725 kobj_ns_type_register(&net_ns_type_operations);
91726 return class_register(&net_class);
91727diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
91728index f9765203..9feaef8 100644
91729--- a/net/core/net_namespace.c
91730+++ b/net/core/net_namespace.c
91731@@ -443,7 +443,7 @@ static int __register_pernet_operations(struct list_head *list,
91732 int error;
91733 LIST_HEAD(net_exit_list);
91734
91735- list_add_tail(&ops->list, list);
91736+ pax_list_add_tail((struct list_head *)&ops->list, list);
91737 if (ops->init || (ops->id && ops->size)) {
91738 for_each_net(net) {
91739 error = ops_init(ops, net);
91740@@ -456,7 +456,7 @@ static int __register_pernet_operations(struct list_head *list,
91741
91742 out_undo:
91743 /* If I have an error cleanup all namespaces I initialized */
91744- list_del(&ops->list);
91745+ pax_list_del((struct list_head *)&ops->list);
91746 ops_exit_list(ops, &net_exit_list);
91747 ops_free_list(ops, &net_exit_list);
91748 return error;
91749@@ -467,7 +467,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
91750 struct net *net;
91751 LIST_HEAD(net_exit_list);
91752
91753- list_del(&ops->list);
91754+ pax_list_del((struct list_head *)&ops->list);
91755 for_each_net(net)
91756 list_add_tail(&net->exit_list, &net_exit_list);
91757 ops_exit_list(ops, &net_exit_list);
91758@@ -601,7 +601,7 @@ int register_pernet_device(struct pernet_operations *ops)
91759 mutex_lock(&net_mutex);
91760 error = register_pernet_operations(&pernet_list, ops);
91761 if (!error && (first_device == &pernet_list))
91762- first_device = &ops->list;
91763+ first_device = (struct list_head *)&ops->list;
91764 mutex_unlock(&net_mutex);
91765 return error;
91766 }
91767diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
91768index a08bd2b..c59bd7c 100644
91769--- a/net/core/rtnetlink.c
91770+++ b/net/core/rtnetlink.c
91771@@ -58,7 +58,7 @@ struct rtnl_link {
91772 rtnl_doit_func doit;
91773 rtnl_dumpit_func dumpit;
91774 rtnl_calcit_func calcit;
91775-};
91776+} __no_const;
91777
91778 static DEFINE_MUTEX(rtnl_mutex);
91779
91780@@ -299,10 +299,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
91781 if (rtnl_link_ops_get(ops->kind))
91782 return -EEXIST;
91783
91784- if (!ops->dellink)
91785- ops->dellink = unregister_netdevice_queue;
91786+ if (!ops->dellink) {
91787+ pax_open_kernel();
91788+ *(void **)&ops->dellink = unregister_netdevice_queue;
91789+ pax_close_kernel();
91790+ }
91791
91792- list_add_tail(&ops->list, &link_ops);
91793+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
91794 return 0;
91795 }
91796 EXPORT_SYMBOL_GPL(__rtnl_link_register);
91797@@ -349,7 +352,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
91798 for_each_net(net) {
91799 __rtnl_kill_links(net, ops);
91800 }
91801- list_del(&ops->list);
91802+ pax_list_del((struct list_head *)&ops->list);
91803 }
91804 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
91805
91806@@ -2374,7 +2377,7 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
91807 struct nlattr *extfilt;
91808 u32 filter_mask = 0;
91809
91810- extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct rtgenmsg),
91811+ extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg),
91812 IFLA_EXT_MASK);
91813 if (extfilt)
91814 filter_mask = nla_get_u32(extfilt);
91815diff --git a/net/core/scm.c b/net/core/scm.c
e2b79cd1 91816index 03795d0..98d6bdb 100644
bb5f0bf8
AF
91817--- a/net/core/scm.c
91818+++ b/net/core/scm.c
e2b79cd1
AF
91819@@ -54,7 +54,7 @@ static __inline__ int scm_check_creds(struct ucred *creds)
91820 return -EINVAL;
91821
91822 if ((creds->pid == task_tgid_vnr(current) ||
91823- ns_capable(current->nsproxy->pid_ns->user_ns, CAP_SYS_ADMIN)) &&
91824+ ns_capable(task_active_pid_ns(current)->user_ns, CAP_SYS_ADMIN)) &&
91825 ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) ||
91826 uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) &&
91827 ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) ||
bb5f0bf8
AF
91828@@ -210,7 +210,7 @@ EXPORT_SYMBOL(__scm_send);
91829 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
91830 {
91831 struct cmsghdr __user *cm
91832- = (__force struct cmsghdr __user *)msg->msg_control;
91833+ = (struct cmsghdr __force_user *)msg->msg_control;
91834 struct cmsghdr cmhdr;
91835 int cmlen = CMSG_LEN(len);
91836 int err;
91837@@ -233,7 +233,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
91838 err = -EFAULT;
91839 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
91840 goto out;
91841- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
91842+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
91843 goto out;
91844 cmlen = CMSG_SPACE(len);
91845 if (msg->msg_controllen < cmlen)
91846@@ -249,7 +249,7 @@ EXPORT_SYMBOL(put_cmsg);
91847 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
91848 {
91849 struct cmsghdr __user *cm
91850- = (__force struct cmsghdr __user*)msg->msg_control;
91851+ = (struct cmsghdr __force_user *)msg->msg_control;
91852
91853 int fdmax = 0;
91854 int fdnum = scm->fp->count;
91855@@ -269,7 +269,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
91856 if (fdnum < fdmax)
91857 fdmax = fdnum;
91858
91859- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
91860+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
91861 i++, cmfptr++)
91862 {
91863 struct socket *sock;
91864diff --git a/net/core/skbuff.c b/net/core/skbuff.c
91865index 1c1738c..4cab7f0 100644
91866--- a/net/core/skbuff.c
91867+++ b/net/core/skbuff.c
91868@@ -3087,13 +3087,15 @@ void __init skb_init(void)
91869 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
91870 sizeof(struct sk_buff),
91871 0,
91872- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
91873+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
91874+ SLAB_NO_SANITIZE,
91875 NULL);
91876 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
91877 (2*sizeof(struct sk_buff)) +
91878 sizeof(atomic_t),
91879 0,
91880- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
91881+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
91882+ SLAB_NO_SANITIZE,
91883 NULL);
91884 }
91885
91886diff --git a/net/core/sock.c b/net/core/sock.c
91887index d6d024c..6ea7ab4 100644
91888--- a/net/core/sock.c
91889+++ b/net/core/sock.c
91890@@ -390,7 +390,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
91891 struct sk_buff_head *list = &sk->sk_receive_queue;
91892
91893 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
91894- atomic_inc(&sk->sk_drops);
91895+ atomic_inc_unchecked(&sk->sk_drops);
91896 trace_sock_rcvqueue_full(sk, skb);
91897 return -ENOMEM;
91898 }
91899@@ -400,7 +400,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
91900 return err;
91901
91902 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
91903- atomic_inc(&sk->sk_drops);
91904+ atomic_inc_unchecked(&sk->sk_drops);
91905 return -ENOBUFS;
91906 }
91907
91908@@ -420,7 +420,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
91909 skb_dst_force(skb);
91910
91911 spin_lock_irqsave(&list->lock, flags);
91912- skb->dropcount = atomic_read(&sk->sk_drops);
91913+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
91914 __skb_queue_tail(list, skb);
91915 spin_unlock_irqrestore(&list->lock, flags);
91916
91917@@ -440,7 +440,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
91918 skb->dev = NULL;
91919
91920 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
91921- atomic_inc(&sk->sk_drops);
91922+ atomic_inc_unchecked(&sk->sk_drops);
91923 goto discard_and_relse;
91924 }
91925 if (nested)
91926@@ -458,7 +458,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
91927 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
91928 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
91929 bh_unlock_sock(sk);
91930- atomic_inc(&sk->sk_drops);
91931+ atomic_inc_unchecked(&sk->sk_drops);
91932 goto discard_and_relse;
91933 }
91934
91935@@ -933,12 +933,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
91936 struct timeval tm;
91937 } v;
91938
91939- int lv = sizeof(int);
91940- int len;
91941+ unsigned int lv = sizeof(int);
91942+ unsigned int len;
91943
91944 if (get_user(len, optlen))
91945 return -EFAULT;
91946- if (len < 0)
91947+ if (len > INT_MAX)
91948 return -EINVAL;
91949
91950 memset(&v, 0, sizeof(v));
91951@@ -1090,11 +1090,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
91952
91953 case SO_PEERNAME:
91954 {
91955- char address[128];
91956+ char address[_K_SS_MAXSIZE];
91957
91958 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
91959 return -ENOTCONN;
91960- if (lv < len)
91961+ if (lv < len || sizeof address < len)
91962 return -EINVAL;
91963 if (copy_to_user(optval, address, len))
91964 return -EFAULT;
91965@@ -1161,7 +1161,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
91966
91967 if (len > lv)
91968 len = lv;
91969- if (copy_to_user(optval, &v, len))
91970+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
91971 return -EFAULT;
91972 lenout:
91973 if (put_user(len, optlen))
91974@@ -2277,7 +2277,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
91975 */
91976 smp_wmb();
91977 atomic_set(&sk->sk_refcnt, 1);
91978- atomic_set(&sk->sk_drops, 0);
91979+ atomic_set_unchecked(&sk->sk_drops, 0);
91980 }
91981 EXPORT_SYMBOL(sock_init_data);
91982
91983diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
91984index a0e9cf6..ef7f9ed 100644
91985--- a/net/core/sock_diag.c
91986+++ b/net/core/sock_diag.c
91987@@ -9,26 +9,33 @@
91988 #include <linux/inet_diag.h>
91989 #include <linux/sock_diag.h>
91990
91991-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
91992+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
91993 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
91994 static DEFINE_MUTEX(sock_diag_table_mutex);
91995
91996 int sock_diag_check_cookie(void *sk, __u32 *cookie)
91997 {
91998+#ifndef CONFIG_GRKERNSEC_HIDESYM
91999 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
92000 cookie[1] != INET_DIAG_NOCOOKIE) &&
92001 ((u32)(unsigned long)sk != cookie[0] ||
92002 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
92003 return -ESTALE;
92004 else
92005+#endif
92006 return 0;
92007 }
92008 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
92009
92010 void sock_diag_save_cookie(void *sk, __u32 *cookie)
92011 {
92012+#ifdef CONFIG_GRKERNSEC_HIDESYM
92013+ cookie[0] = 0;
92014+ cookie[1] = 0;
92015+#else
92016 cookie[0] = (u32)(unsigned long)sk;
92017 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
92018+#endif
92019 }
92020 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
92021
92022@@ -113,8 +120,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
92023 mutex_lock(&sock_diag_table_mutex);
92024 if (sock_diag_handlers[hndl->family])
92025 err = -EBUSY;
92026- else
92027+ else {
92028+ pax_open_kernel();
92029 sock_diag_handlers[hndl->family] = hndl;
92030+ pax_close_kernel();
92031+ }
92032 mutex_unlock(&sock_diag_table_mutex);
92033
92034 return err;
92035@@ -130,7 +140,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
92036
92037 mutex_lock(&sock_diag_table_mutex);
92038 BUG_ON(sock_diag_handlers[family] != hnld);
92039+ pax_open_kernel();
92040 sock_diag_handlers[family] = NULL;
92041+ pax_close_kernel();
92042 mutex_unlock(&sock_diag_table_mutex);
92043 }
92044 EXPORT_SYMBOL_GPL(sock_diag_unregister);
92045diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
92046index cfdb46a..cef55e1 100644
92047--- a/net/core/sysctl_net_core.c
92048+++ b/net/core/sysctl_net_core.c
92049@@ -28,7 +28,7 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write,
92050 {
92051 unsigned int orig_size, size;
92052 int ret, i;
92053- ctl_table tmp = {
92054+ ctl_table_no_const tmp = {
92055 .data = &size,
92056 .maxlen = sizeof(size),
92057 .mode = table->mode
92058@@ -211,13 +211,12 @@ static struct ctl_table netns_core_table[] = {
92059
92060 static __net_init int sysctl_core_net_init(struct net *net)
92061 {
92062- struct ctl_table *tbl;
92063+ ctl_table_no_const *tbl = NULL;
92064
92065 net->core.sysctl_somaxconn = SOMAXCONN;
92066
92067- tbl = netns_core_table;
92068 if (!net_eq(net, &init_net)) {
92069- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
92070+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
92071 if (tbl == NULL)
92072 goto err_dup;
92073
92074@@ -227,17 +226,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
92075 if (net->user_ns != &init_user_ns) {
92076 tbl[0].procname = NULL;
92077 }
92078- }
92079-
92080- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
92081+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
92082+ } else
92083+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
92084 if (net->core.sysctl_hdr == NULL)
92085 goto err_reg;
92086
92087 return 0;
92088
92089 err_reg:
92090- if (tbl != netns_core_table)
92091- kfree(tbl);
92092+ kfree(tbl);
92093 err_dup:
92094 return -ENOMEM;
92095 }
92096@@ -252,7 +250,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
92097 kfree(tbl);
92098 }
92099
92100-static __net_initdata struct pernet_operations sysctl_core_ops = {
92101+static __net_initconst struct pernet_operations sysctl_core_ops = {
92102 .init = sysctl_core_net_init,
92103 .exit = sysctl_core_net_exit,
92104 };
92105diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
92106index c21f200..bc4565b 100644
92107--- a/net/decnet/af_decnet.c
92108+++ b/net/decnet/af_decnet.c
92109@@ -465,6 +465,7 @@ static struct proto dn_proto = {
92110 .sysctl_rmem = sysctl_decnet_rmem,
92111 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
92112 .obj_size = sizeof(struct dn_sock),
92113+ .slab_flags = SLAB_USERCOPY,
92114 };
92115
92116 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
92117diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
92118index a55eecc..dd8428c 100644
92119--- a/net/decnet/sysctl_net_decnet.c
92120+++ b/net/decnet/sysctl_net_decnet.c
92121@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
92122
92123 if (len > *lenp) len = *lenp;
92124
92125- if (copy_to_user(buffer, addr, len))
92126+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
92127 return -EFAULT;
92128
92129 *lenp = len;
92130@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
92131
92132 if (len > *lenp) len = *lenp;
92133
92134- if (copy_to_user(buffer, devname, len))
92135+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
92136 return -EFAULT;
92137
92138 *lenp = len;
e2b79cd1
AF
92139diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
92140index 55e1fd5..fd602b8 100644
92141--- a/net/ieee802154/6lowpan.c
92142+++ b/net/ieee802154/6lowpan.c
92143@@ -459,7 +459,7 @@ static int lowpan_header_create(struct sk_buff *skb,
92144 hc06_ptr += 3;
92145 } else {
92146 /* compress nothing */
92147- memcpy(hc06_ptr, &hdr, 4);
92148+ memcpy(hc06_ptr, hdr, 4);
92149 /* replace the top byte with new ECN | DSCP format */
92150 *hc06_ptr = tmp;
92151 hc06_ptr += 4;
bb5f0bf8
AF
92152diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
92153index d01be2a..8976537 100644
92154--- a/net/ipv4/af_inet.c
92155+++ b/net/ipv4/af_inet.c
92156@@ -1703,13 +1703,9 @@ static int __init inet_init(void)
92157
92158 BUILD_BUG_ON(sizeof(struct inet_skb_parm) > FIELD_SIZEOF(struct sk_buff, cb));
92159
92160- sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
92161- if (!sysctl_local_reserved_ports)
92162- goto out;
92163-
92164 rc = proto_register(&tcp_prot, 1);
92165 if (rc)
92166- goto out_free_reserved_ports;
92167+ goto out;
92168
92169 rc = proto_register(&udp_prot, 1);
92170 if (rc)
92171@@ -1818,8 +1814,6 @@ out_unregister_udp_proto:
92172 proto_unregister(&udp_prot);
92173 out_unregister_tcp_proto:
92174 proto_unregister(&tcp_prot);
92175-out_free_reserved_ports:
92176- kfree(sysctl_local_reserved_ports);
92177 goto out;
92178 }
92179
92180diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
92181index 2e7f194..0fa4d6d 100644
92182--- a/net/ipv4/ah4.c
92183+++ b/net/ipv4/ah4.c
92184@@ -420,7 +420,7 @@ static void ah4_err(struct sk_buff *skb, u32 info)
92185 return;
92186
92187 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
92188- atomic_inc(&flow_cache_genid);
92189+ atomic_inc_unchecked(&flow_cache_genid);
92190 rt_genid_bump(net);
92191
92192 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0);
92193diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
92194index dfc39d4..0d4fa52 100644
92195--- a/net/ipv4/devinet.c
92196+++ b/net/ipv4/devinet.c
92197@@ -771,7 +771,7 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
92198 ci = nla_data(tb[IFA_CACHEINFO]);
92199 if (!ci->ifa_valid || ci->ifa_prefered > ci->ifa_valid) {
92200 err = -EINVAL;
92201- goto errout;
92202+ goto errout_free;
92203 }
92204 *pvalid_lft = ci->ifa_valid;
92205 *pprefered_lft = ci->ifa_prefered;
92206@@ -779,6 +779,8 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
92207
92208 return ifa;
92209
92210+errout_free:
92211+ inet_free_ifa(ifa);
92212 errout:
92213 return ERR_PTR(err);
92214 }
92215@@ -1529,7 +1531,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
92216 idx = 0;
92217 head = &net->dev_index_head[h];
92218 rcu_read_lock();
92219- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
92220+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
92221 net->dev_base_seq;
92222 hlist_for_each_entry_rcu(dev, head, index_hlist) {
92223 if (idx < s_idx)
92224@@ -1840,7 +1842,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
92225 idx = 0;
92226 head = &net->dev_index_head[h];
92227 rcu_read_lock();
92228- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
92229+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
92230 net->dev_base_seq;
92231 hlist_for_each_entry_rcu(dev, head, index_hlist) {
92232 if (idx < s_idx)
92233@@ -2065,7 +2067,7 @@ static int ipv4_doint_and_flush(ctl_table *ctl, int write,
92234 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
92235 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
92236
92237-static struct devinet_sysctl_table {
92238+static const struct devinet_sysctl_table {
92239 struct ctl_table_header *sysctl_header;
92240 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
92241 } devinet_sysctl = {
92242@@ -2183,7 +2185,7 @@ static __net_init int devinet_init_net(struct net *net)
92243 int err;
92244 struct ipv4_devconf *all, *dflt;
92245 #ifdef CONFIG_SYSCTL
92246- struct ctl_table *tbl = ctl_forward_entry;
92247+ ctl_table_no_const *tbl = NULL;
92248 struct ctl_table_header *forw_hdr;
92249 #endif
92250
92251@@ -2201,7 +2203,7 @@ static __net_init int devinet_init_net(struct net *net)
92252 goto err_alloc_dflt;
92253
92254 #ifdef CONFIG_SYSCTL
92255- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
92256+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
92257 if (tbl == NULL)
92258 goto err_alloc_ctl;
92259
92260@@ -2221,7 +2223,10 @@ static __net_init int devinet_init_net(struct net *net)
92261 goto err_reg_dflt;
92262
92263 err = -ENOMEM;
92264- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
92265+ if (!net_eq(net, &init_net))
92266+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
92267+ else
92268+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
92269 if (forw_hdr == NULL)
92270 goto err_reg_ctl;
92271 net->ipv4.forw_hdr = forw_hdr;
92272@@ -2237,8 +2242,7 @@ err_reg_ctl:
92273 err_reg_dflt:
92274 __devinet_sysctl_unregister(all);
92275 err_reg_all:
92276- if (tbl != ctl_forward_entry)
92277- kfree(tbl);
92278+ kfree(tbl);
92279 err_alloc_ctl:
92280 #endif
92281 if (dflt != &ipv4_devconf_dflt)
92282diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
92283index 4cfe34d..d2fac8a 100644
92284--- a/net/ipv4/esp4.c
92285+++ b/net/ipv4/esp4.c
92286@@ -477,7 +477,7 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
92287 }
92288
92289 return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
92290- net_adj) & ~(align - 1)) + (net_adj - 2);
92291+ net_adj) & ~(align - 1)) + net_adj - 2;
92292 }
92293
92294 static void esp4_err(struct sk_buff *skb, u32 info)
92295@@ -503,7 +503,7 @@ static void esp4_err(struct sk_buff *skb, u32 info)
92296 return;
92297
92298 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
92299- atomic_inc(&flow_cache_genid);
92300+ atomic_inc_unchecked(&flow_cache_genid);
92301 rt_genid_bump(net);
92302
92303 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0);
92304diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
92305index c7629a2..b62d139 100644
92306--- a/net/ipv4/fib_frontend.c
92307+++ b/net/ipv4/fib_frontend.c
92308@@ -1017,12 +1017,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
92309 #ifdef CONFIG_IP_ROUTE_MULTIPATH
92310 fib_sync_up(dev);
92311 #endif
92312- atomic_inc(&net->ipv4.dev_addr_genid);
92313+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
92314 rt_cache_flush(dev_net(dev));
92315 break;
92316 case NETDEV_DOWN:
92317 fib_del_ifaddr(ifa, NULL);
92318- atomic_inc(&net->ipv4.dev_addr_genid);
92319+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
92320 if (ifa->ifa_dev->ifa_list == NULL) {
92321 /* Last address was deleted from this interface.
92322 * Disable IP.
92323@@ -1058,7 +1058,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
92324 #ifdef CONFIG_IP_ROUTE_MULTIPATH
92325 fib_sync_up(dev);
92326 #endif
92327- atomic_inc(&net->ipv4.dev_addr_genid);
92328+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
92329 rt_cache_flush(net);
92330 break;
92331 case NETDEV_DOWN:
92332diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
92333index 8f6cb7a..34507f9 100644
92334--- a/net/ipv4/fib_semantics.c
92335+++ b/net/ipv4/fib_semantics.c
92336@@ -765,7 +765,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
92337 nh->nh_saddr = inet_select_addr(nh->nh_dev,
92338 nh->nh_gw,
92339 nh->nh_parent->fib_scope);
92340- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
92341+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
92342
92343 return nh->nh_saddr;
92344 }
92345diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
92346index 49616fe..6e8a13d 100644
92347--- a/net/ipv4/fib_trie.c
92348+++ b/net/ipv4/fib_trie.c
92349@@ -71,7 +71,6 @@
92350 #include <linux/init.h>
92351 #include <linux/list.h>
92352 #include <linux/slab.h>
92353-#include <linux/prefetch.h>
92354 #include <linux/export.h>
92355 #include <net/net_namespace.h>
92356 #include <net/ip.h>
92357@@ -1761,10 +1760,8 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct rt_trie_node *c)
92358 if (!c)
92359 continue;
92360
92361- if (IS_LEAF(c)) {
92362- prefetch(rcu_dereference_rtnl(p->child[idx]));
92363+ if (IS_LEAF(c))
92364 return (struct leaf *) c;
92365- }
92366
92367 /* Rescan start scanning in new node */
92368 p = (struct tnode *) c;
92369diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
92370index 6acb541..9ea617d 100644
92371--- a/net/ipv4/inet_connection_sock.c
92372+++ b/net/ipv4/inet_connection_sock.c
92373@@ -37,7 +37,7 @@ struct local_ports sysctl_local_ports __read_mostly = {
92374 .range = { 32768, 61000 },
92375 };
92376
92377-unsigned long *sysctl_local_reserved_ports;
92378+unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
92379 EXPORT_SYMBOL(sysctl_local_reserved_ports);
92380
92381 void inet_get_local_port_range(int *low, int *high)
92382diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
92383index 6af375a..c493c74 100644
92384--- a/net/ipv4/inet_hashtables.c
92385+++ b/net/ipv4/inet_hashtables.c
92386@@ -18,12 +18,15 @@
92387 #include <linux/sched.h>
92388 #include <linux/slab.h>
92389 #include <linux/wait.h>
92390+#include <linux/security.h>
92391
92392 #include <net/inet_connection_sock.h>
92393 #include <net/inet_hashtables.h>
92394 #include <net/secure_seq.h>
92395 #include <net/ip.h>
92396
92397+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
92398+
92399 /*
92400 * Allocate and initialize a new local port bind bucket.
92401 * The bindhash mutex for snum's hash chain must be held here.
92402@@ -554,6 +557,8 @@ ok:
92403 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
92404 spin_unlock(&head->lock);
92405
92406+ gr_update_task_in_ip_table(current, inet_sk(sk));
92407+
92408 if (tw) {
92409 inet_twsk_deschedule(tw, death_row);
92410 while (twrefcnt) {
92411diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
92412index 000e3d2..5472da3 100644
92413--- a/net/ipv4/inetpeer.c
92414+++ b/net/ipv4/inetpeer.c
92415@@ -503,8 +503,8 @@ relookup:
92416 if (p) {
92417 p->daddr = *daddr;
92418 atomic_set(&p->refcnt, 1);
92419- atomic_set(&p->rid, 0);
92420- atomic_set(&p->ip_id_count,
92421+ atomic_set_unchecked(&p->rid, 0);
92422+ atomic_set_unchecked(&p->ip_id_count,
92423 (daddr->family == AF_INET) ?
92424 secure_ip_id(daddr->addr.a4) :
92425 secure_ipv6_id(daddr->addr.a6));
92426diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
92427index b66910a..cfe416e 100644
92428--- a/net/ipv4/ip_fragment.c
92429+++ b/net/ipv4/ip_fragment.c
92430@@ -282,7 +282,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
92431 return 0;
92432
92433 start = qp->rid;
92434- end = atomic_inc_return(&peer->rid);
92435+ end = atomic_inc_return_unchecked(&peer->rid);
92436 qp->rid = end;
92437
92438 rc = qp->q.fragments && (end - start) > max;
92439@@ -759,12 +759,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
92440
92441 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
92442 {
92443- struct ctl_table *table;
92444+ ctl_table_no_const *table = NULL;
92445 struct ctl_table_header *hdr;
92446
92447- table = ip4_frags_ns_ctl_table;
92448 if (!net_eq(net, &init_net)) {
92449- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
92450+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
92451 if (table == NULL)
92452 goto err_alloc;
92453
92454@@ -775,9 +774,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
92455 /* Don't export sysctls to unprivileged users */
92456 if (net->user_ns != &init_user_ns)
92457 table[0].procname = NULL;
92458- }
92459+ hdr = register_net_sysctl(net, "net/ipv4", table);
92460+ } else
92461+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
92462
92463- hdr = register_net_sysctl(net, "net/ipv4", table);
92464 if (hdr == NULL)
92465 goto err_reg;
92466
92467@@ -785,8 +785,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
92468 return 0;
92469
92470 err_reg:
92471- if (!net_eq(net, &init_net))
92472- kfree(table);
92473+ kfree(table);
92474 err_alloc:
92475 return -ENOMEM;
92476 }
92477diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
92478index 855004f..9644112 100644
92479--- a/net/ipv4/ip_gre.c
92480+++ b/net/ipv4/ip_gre.c
92481@@ -115,7 +115,7 @@ static bool log_ecn_error = true;
92482 module_param(log_ecn_error, bool, 0644);
92483 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
92484
92485-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
92486+static struct rtnl_link_ops ipgre_link_ops;
92487 static int ipgre_tunnel_init(struct net_device *dev);
92488
92489 static int ipgre_net_id __read_mostly;
92490@@ -572,7 +572,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
92491 if (daddr)
92492 memcpy(&iph->daddr, daddr, 4);
92493 if (iph->daddr)
92494- return t->hlen;
92495+ return t->hlen + sizeof(*iph);
92496
92497 return -(t->hlen + sizeof(*iph));
92498 }
92499@@ -919,7 +919,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
92500 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
92501 };
92502
92503-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
92504+static struct rtnl_link_ops ipgre_link_ops = {
92505 .kind = "gre",
92506 .maxtype = IFLA_GRE_MAX,
92507 .policy = ipgre_policy,
92508@@ -933,7 +933,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
92509 .fill_info = ipgre_fill_info,
92510 };
92511
92512-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
92513+static struct rtnl_link_ops ipgre_tap_ops = {
92514 .kind = "gretap",
92515 .maxtype = IFLA_GRE_MAX,
92516 .policy = ipgre_policy,
92517diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
92518index d9c4f11..02b82dbc 100644
92519--- a/net/ipv4/ip_sockglue.c
92520+++ b/net/ipv4/ip_sockglue.c
92521@@ -1152,7 +1152,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
92522 len = min_t(unsigned int, len, opt->optlen);
92523 if (put_user(len, optlen))
92524 return -EFAULT;
92525- if (copy_to_user(optval, opt->__data, len))
92526+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
92527+ copy_to_user(optval, opt->__data, len))
92528 return -EFAULT;
92529 return 0;
92530 }
92531@@ -1283,7 +1284,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
92532 if (sk->sk_type != SOCK_STREAM)
92533 return -ENOPROTOOPT;
92534
92535- msg.msg_control = optval;
92536+ msg.msg_control = (void __force_kernel *)optval;
92537 msg.msg_controllen = len;
92538 msg.msg_flags = flags;
92539
92540diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
92541index 17cc0ff..63856c4 100644
92542--- a/net/ipv4/ip_vti.c
92543+++ b/net/ipv4/ip_vti.c
92544@@ -47,7 +47,7 @@
92545 #define HASH_SIZE 16
92546 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&(HASH_SIZE-1))
92547
92548-static struct rtnl_link_ops vti_link_ops __read_mostly;
92549+static struct rtnl_link_ops vti_link_ops;
92550
92551 static int vti_net_id __read_mostly;
92552 struct vti_net {
92553@@ -840,7 +840,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
92554 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
92555 };
92556
92557-static struct rtnl_link_ops vti_link_ops __read_mostly = {
92558+static struct rtnl_link_ops vti_link_ops = {
92559 .kind = "vti",
92560 .maxtype = IFLA_VTI_MAX,
92561 .policy = vti_policy,
92562diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
92563index 59cb8c7..a72160c 100644
92564--- a/net/ipv4/ipcomp.c
92565+++ b/net/ipv4/ipcomp.c
92566@@ -48,7 +48,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
92567 return;
92568
92569 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
92570- atomic_inc(&flow_cache_genid);
92571+ atomic_inc_unchecked(&flow_cache_genid);
92572 rt_genid_bump(net);
92573
92574 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_COMP, 0);
92575diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
92576index efa1138..20dbba0 100644
92577--- a/net/ipv4/ipconfig.c
92578+++ b/net/ipv4/ipconfig.c
92579@@ -334,7 +334,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
92580
92581 mm_segment_t oldfs = get_fs();
92582 set_fs(get_ds());
92583- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
92584+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
92585 set_fs(oldfs);
92586 return res;
92587 }
92588@@ -345,7 +345,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
92589
92590 mm_segment_t oldfs = get_fs();
92591 set_fs(get_ds());
92592- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
92593+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
92594 set_fs(oldfs);
92595 return res;
92596 }
92597@@ -356,7 +356,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
92598
92599 mm_segment_t oldfs = get_fs();
92600 set_fs(get_ds());
92601- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
92602+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
92603 set_fs(oldfs);
92604 return res;
92605 }
92606diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
92607index 7cfc456..e726868 100644
92608--- a/net/ipv4/ipip.c
92609+++ b/net/ipv4/ipip.c
92610@@ -124,7 +124,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
92611 static int ipip_net_id __read_mostly;
92612
92613 static int ipip_tunnel_init(struct net_device *dev);
92614-static struct rtnl_link_ops ipip_link_ops __read_mostly;
92615+static struct rtnl_link_ops ipip_link_ops;
92616
92617 static int ipip_err(struct sk_buff *skb, u32 info)
92618 {
92619@@ -406,7 +406,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
92620 [IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 },
92621 };
92622
92623-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
92624+static struct rtnl_link_ops ipip_link_ops = {
92625 .kind = "ipip",
92626 .maxtype = IFLA_IPTUN_MAX,
92627 .policy = ipip_policy,
92628diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
92629index 85a4f21..1beb1f5 100644
92630--- a/net/ipv4/netfilter/arp_tables.c
92631+++ b/net/ipv4/netfilter/arp_tables.c
92632@@ -880,14 +880,14 @@ static int compat_table_info(const struct xt_table_info *info,
92633 #endif
92634
92635 static int get_info(struct net *net, void __user *user,
92636- const int *len, int compat)
92637+ int len, int compat)
92638 {
92639 char name[XT_TABLE_MAXNAMELEN];
92640 struct xt_table *t;
92641 int ret;
92642
92643- if (*len != sizeof(struct arpt_getinfo)) {
92644- duprintf("length %u != %Zu\n", *len,
92645+ if (len != sizeof(struct arpt_getinfo)) {
92646+ duprintf("length %u != %Zu\n", len,
92647 sizeof(struct arpt_getinfo));
92648 return -EINVAL;
92649 }
92650@@ -924,7 +924,7 @@ static int get_info(struct net *net, void __user *user,
92651 info.size = private->size;
92652 strcpy(info.name, name);
92653
92654- if (copy_to_user(user, &info, *len) != 0)
92655+ if (copy_to_user(user, &info, len) != 0)
92656 ret = -EFAULT;
92657 else
92658 ret = 0;
92659@@ -1683,7 +1683,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
92660
92661 switch (cmd) {
92662 case ARPT_SO_GET_INFO:
92663- ret = get_info(sock_net(sk), user, len, 1);
92664+ ret = get_info(sock_net(sk), user, *len, 1);
92665 break;
92666 case ARPT_SO_GET_ENTRIES:
92667 ret = compat_get_entries(sock_net(sk), user, len);
92668@@ -1728,7 +1728,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
92669
92670 switch (cmd) {
92671 case ARPT_SO_GET_INFO:
92672- ret = get_info(sock_net(sk), user, len, 0);
92673+ ret = get_info(sock_net(sk), user, *len, 0);
92674 break;
92675
92676 case ARPT_SO_GET_ENTRIES:
92677diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
92678index d23118d..6ad7277 100644
92679--- a/net/ipv4/netfilter/ip_tables.c
92680+++ b/net/ipv4/netfilter/ip_tables.c
92681@@ -1068,14 +1068,14 @@ static int compat_table_info(const struct xt_table_info *info,
92682 #endif
92683
92684 static int get_info(struct net *net, void __user *user,
92685- const int *len, int compat)
92686+ int len, int compat)
92687 {
92688 char name[XT_TABLE_MAXNAMELEN];
92689 struct xt_table *t;
92690 int ret;
92691
92692- if (*len != sizeof(struct ipt_getinfo)) {
92693- duprintf("length %u != %zu\n", *len,
92694+ if (len != sizeof(struct ipt_getinfo)) {
92695+ duprintf("length %u != %zu\n", len,
92696 sizeof(struct ipt_getinfo));
92697 return -EINVAL;
92698 }
92699@@ -1112,7 +1112,7 @@ static int get_info(struct net *net, void __user *user,
92700 info.size = private->size;
92701 strcpy(info.name, name);
92702
92703- if (copy_to_user(user, &info, *len) != 0)
92704+ if (copy_to_user(user, &info, len) != 0)
92705 ret = -EFAULT;
92706 else
92707 ret = 0;
92708@@ -1966,7 +1966,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
92709
92710 switch (cmd) {
92711 case IPT_SO_GET_INFO:
92712- ret = get_info(sock_net(sk), user, len, 1);
92713+ ret = get_info(sock_net(sk), user, *len, 1);
92714 break;
92715 case IPT_SO_GET_ENTRIES:
92716 ret = compat_get_entries(sock_net(sk), user, len);
92717@@ -2013,7 +2013,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
92718
92719 switch (cmd) {
92720 case IPT_SO_GET_INFO:
92721- ret = get_info(sock_net(sk), user, len, 0);
92722+ ret = get_info(sock_net(sk), user, *len, 0);
92723 break;
92724
92725 case IPT_SO_GET_ENTRIES:
92726diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
92727index 7d93d62..cbbf2a3 100644
92728--- a/net/ipv4/ping.c
92729+++ b/net/ipv4/ping.c
92730@@ -843,7 +843,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
92731 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
92732 0, sock_i_ino(sp),
92733 atomic_read(&sp->sk_refcnt), sp,
92734- atomic_read(&sp->sk_drops), len);
92735+ atomic_read_unchecked(&sp->sk_drops), len);
92736 }
92737
92738 static int ping_seq_show(struct seq_file *seq, void *v)
92739diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
92740index dd44e0a..06dcca4 100644
92741--- a/net/ipv4/raw.c
92742+++ b/net/ipv4/raw.c
92743@@ -309,7 +309,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
92744 int raw_rcv(struct sock *sk, struct sk_buff *skb)
92745 {
92746 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
92747- atomic_inc(&sk->sk_drops);
92748+ atomic_inc_unchecked(&sk->sk_drops);
92749 kfree_skb(skb);
92750 return NET_RX_DROP;
92751 }
92752@@ -745,16 +745,20 @@ static int raw_init(struct sock *sk)
92753
92754 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
92755 {
92756+ struct icmp_filter filter;
92757+
92758 if (optlen > sizeof(struct icmp_filter))
92759 optlen = sizeof(struct icmp_filter);
92760- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
92761+ if (copy_from_user(&filter, optval, optlen))
92762 return -EFAULT;
92763+ raw_sk(sk)->filter = filter;
92764 return 0;
92765 }
92766
92767 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
92768 {
92769 int len, ret = -EFAULT;
92770+ struct icmp_filter filter;
92771
92772 if (get_user(len, optlen))
92773 goto out;
92774@@ -764,8 +768,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
92775 if (len > sizeof(struct icmp_filter))
92776 len = sizeof(struct icmp_filter);
92777 ret = -EFAULT;
92778- if (put_user(len, optlen) ||
92779- copy_to_user(optval, &raw_sk(sk)->filter, len))
92780+ filter = raw_sk(sk)->filter;
92781+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
92782 goto out;
92783 ret = 0;
92784 out: return ret;
92785@@ -994,7 +998,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
92786 0, 0L, 0,
92787 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
92788 0, sock_i_ino(sp),
92789- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
92790+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
92791 }
92792
92793 static int raw_seq_show(struct seq_file *seq, void *v)
92794diff --git a/net/ipv4/route.c b/net/ipv4/route.c
92795index d35bbf0..faa3ab8 100644
92796--- a/net/ipv4/route.c
92797+++ b/net/ipv4/route.c
92798@@ -2558,34 +2558,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
92799 .maxlen = sizeof(int),
92800 .mode = 0200,
92801 .proc_handler = ipv4_sysctl_rtcache_flush,
92802+ .extra1 = &init_net,
92803 },
92804 { },
92805 };
92806
92807 static __net_init int sysctl_route_net_init(struct net *net)
92808 {
92809- struct ctl_table *tbl;
92810+ ctl_table_no_const *tbl = NULL;
92811
92812- tbl = ipv4_route_flush_table;
92813 if (!net_eq(net, &init_net)) {
92814- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
92815+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
92816 if (tbl == NULL)
92817 goto err_dup;
92818
92819 /* Don't export sysctls to unprivileged users */
92820 if (net->user_ns != &init_user_ns)
92821 tbl[0].procname = NULL;
92822- }
92823- tbl[0].extra1 = net;
92824+ tbl[0].extra1 = net;
92825+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
92826+ } else
92827+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
92828
92829- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
92830 if (net->ipv4.route_hdr == NULL)
92831 goto err_reg;
92832 return 0;
92833
92834 err_reg:
92835- if (tbl != ipv4_route_flush_table)
92836- kfree(tbl);
92837+ kfree(tbl);
92838 err_dup:
92839 return -ENOMEM;
92840 }
92841@@ -2608,7 +2608,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
92842
92843 static __net_init int rt_genid_init(struct net *net)
92844 {
92845- atomic_set(&net->rt_genid, 0);
92846+ atomic_set_unchecked(&net->rt_genid, 0);
92847 get_random_bytes(&net->ipv4.dev_addr_genid,
92848 sizeof(net->ipv4.dev_addr_genid));
92849 return 0;
92850diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
92851index 3f25e75..3ae0f4d 100644
92852--- a/net/ipv4/sysctl_net_ipv4.c
92853+++ b/net/ipv4/sysctl_net_ipv4.c
92854@@ -57,7 +57,7 @@ static int ipv4_local_port_range(ctl_table *table, int write,
92855 {
92856 int ret;
92857 int range[2];
92858- ctl_table tmp = {
92859+ ctl_table_no_const tmp = {
92860 .data = &range,
92861 .maxlen = sizeof(range),
92862 .mode = table->mode,
92863@@ -110,7 +110,7 @@ static int ipv4_ping_group_range(ctl_table *table, int write,
92864 int ret;
92865 gid_t urange[2];
92866 kgid_t low, high;
92867- ctl_table tmp = {
92868+ ctl_table_no_const tmp = {
92869 .data = &urange,
92870 .maxlen = sizeof(urange),
92871 .mode = table->mode,
92872@@ -141,7 +141,7 @@ static int proc_tcp_congestion_control(ctl_table *ctl, int write,
92873 void __user *buffer, size_t *lenp, loff_t *ppos)
92874 {
92875 char val[TCP_CA_NAME_MAX];
92876- ctl_table tbl = {
92877+ ctl_table_no_const tbl = {
92878 .data = val,
92879 .maxlen = TCP_CA_NAME_MAX,
92880 };
92881@@ -160,7 +160,7 @@ static int proc_tcp_available_congestion_control(ctl_table *ctl,
92882 void __user *buffer, size_t *lenp,
92883 loff_t *ppos)
92884 {
92885- ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
92886+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
92887 int ret;
92888
92889 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
92890@@ -177,7 +177,7 @@ static int proc_allowed_congestion_control(ctl_table *ctl,
92891 void __user *buffer, size_t *lenp,
92892 loff_t *ppos)
92893 {
92894- ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
92895+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
92896 int ret;
92897
92898 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
92899@@ -203,15 +203,17 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
92900 struct mem_cgroup *memcg;
92901 #endif
92902
92903- ctl_table tmp = {
92904+ ctl_table_no_const tmp = {
92905 .data = &vec,
92906 .maxlen = sizeof(vec),
92907 .mode = ctl->mode,
92908 };
92909
92910 if (!write) {
92911- ctl->data = &net->ipv4.sysctl_tcp_mem;
92912- return proc_doulongvec_minmax(ctl, write, buffer, lenp, ppos);
92913+ ctl_table_no_const tcp_mem = *ctl;
92914+
92915+ tcp_mem.data = &net->ipv4.sysctl_tcp_mem;
92916+ return proc_doulongvec_minmax(&tcp_mem, write, buffer, lenp, ppos);
92917 }
92918
92919 ret = proc_doulongvec_minmax(&tmp, write, buffer, lenp, ppos);
92920@@ -238,7 +240,7 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
92921 static int proc_tcp_fastopen_key(ctl_table *ctl, int write, void __user *buffer,
92922 size_t *lenp, loff_t *ppos)
92923 {
92924- ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
92925+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
92926 struct tcp_fastopen_context *ctxt;
92927 int ret;
92928 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
92929@@ -481,7 +483,7 @@ static struct ctl_table ipv4_table[] = {
92930 },
92931 {
92932 .procname = "ip_local_reserved_ports",
92933- .data = NULL, /* initialized in sysctl_ipv4_init */
92934+ .data = sysctl_local_reserved_ports,
92935 .maxlen = 65536,
92936 .mode = 0644,
92937 .proc_handler = proc_do_large_bitmap,
92938@@ -846,11 +848,10 @@ static struct ctl_table ipv4_net_table[] = {
92939
92940 static __net_init int ipv4_sysctl_init_net(struct net *net)
92941 {
92942- struct ctl_table *table;
92943+ ctl_table_no_const *table = NULL;
92944
92945- table = ipv4_net_table;
92946 if (!net_eq(net, &init_net)) {
92947- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
92948+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
92949 if (table == NULL)
92950 goto err_alloc;
92951
92952@@ -885,15 +886,17 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
92953
92954 tcp_init_mem(net);
92955
92956- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
92957+ if (!net_eq(net, &init_net))
92958+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
92959+ else
92960+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
92961 if (net->ipv4.ipv4_hdr == NULL)
92962 goto err_reg;
92963
92964 return 0;
92965
92966 err_reg:
92967- if (!net_eq(net, &init_net))
92968- kfree(table);
92969+ kfree(table);
92970 err_alloc:
92971 return -ENOMEM;
92972 }
92973@@ -915,16 +918,6 @@ static __net_initdata struct pernet_operations ipv4_sysctl_ops = {
92974 static __init int sysctl_ipv4_init(void)
92975 {
92976 struct ctl_table_header *hdr;
92977- struct ctl_table *i;
92978-
92979- for (i = ipv4_table; i->procname; i++) {
92980- if (strcmp(i->procname, "ip_local_reserved_ports") == 0) {
92981- i->data = sysctl_local_reserved_ports;
92982- break;
92983- }
92984- }
92985- if (!i->procname)
92986- return -EINVAL;
92987
92988 hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table);
92989 if (hdr == NULL)
92990diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
92991index 9c62257..651cc27 100644
92992--- a/net/ipv4/tcp_input.c
92993+++ b/net/ipv4/tcp_input.c
92994@@ -4436,7 +4436,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
92995 * simplifies code)
92996 */
92997 static void
92998-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
92999+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
93000 struct sk_buff *head, struct sk_buff *tail,
93001 u32 start, u32 end)
93002 {
93003@@ -5522,6 +5522,7 @@ discard:
93004 tcp_paws_reject(&tp->rx_opt, 0))
93005 goto discard_and_undo;
93006
93007+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
93008 if (th->syn) {
93009 /* We see SYN without ACK. It is attempt of
93010 * simultaneous connect with crossed SYNs.
93011@@ -5572,6 +5573,7 @@ discard:
93012 goto discard;
93013 #endif
93014 }
93015+#endif
93016 /* "fifth, if neither of the SYN or RST bits is set then
93017 * drop the segment and return."
93018 */
93019@@ -5616,7 +5618,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
93020 goto discard;
93021
93022 if (th->syn) {
93023- if (th->fin)
93024+ if (th->fin || th->urg || th->psh)
93025 goto discard;
93026 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
93027 return 1;
93028diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
93029index 7999fc5..c812f42 100644
93030--- a/net/ipv4/tcp_ipv4.c
93031+++ b/net/ipv4/tcp_ipv4.c
93032@@ -90,6 +90,10 @@ int sysctl_tcp_low_latency __read_mostly;
93033 EXPORT_SYMBOL(sysctl_tcp_low_latency);
93034
93035
93036+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93037+extern int grsec_enable_blackhole;
93038+#endif
93039+
93040 #ifdef CONFIG_TCP_MD5SIG
93041 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
93042 __be32 daddr, __be32 saddr, const struct tcphdr *th);
93043@@ -1855,6 +1859,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
93044 return 0;
93045
93046 reset:
93047+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93048+ if (!grsec_enable_blackhole)
93049+#endif
93050 tcp_v4_send_reset(rsk, skb);
93051 discard:
93052 kfree_skb(skb);
93053@@ -2000,12 +2007,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
93054 TCP_SKB_CB(skb)->sacked = 0;
93055
93056 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
93057- if (!sk)
93058+ if (!sk) {
93059+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93060+ ret = 1;
93061+#endif
93062 goto no_tcp_socket;
93063-
93064+ }
93065 process:
93066- if (sk->sk_state == TCP_TIME_WAIT)
93067+ if (sk->sk_state == TCP_TIME_WAIT) {
93068+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93069+ ret = 2;
93070+#endif
93071 goto do_time_wait;
93072+ }
93073
93074 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
93075 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
93076@@ -2058,6 +2072,10 @@ csum_error:
93077 bad_packet:
93078 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
93079 } else {
93080+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93081+ if (!grsec_enable_blackhole || (ret == 1 &&
93082+ (skb->dev->flags & IFF_LOOPBACK)))
93083+#endif
93084 tcp_v4_send_reset(NULL, skb);
93085 }
93086
93087diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
93088index 0f01788..d52a859 100644
93089--- a/net/ipv4/tcp_minisocks.c
93090+++ b/net/ipv4/tcp_minisocks.c
93091@@ -27,6 +27,10 @@
93092 #include <net/inet_common.h>
93093 #include <net/xfrm.h>
93094
93095+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93096+extern int grsec_enable_blackhole;
93097+#endif
93098+
93099 int sysctl_tcp_syncookies __read_mostly = 1;
93100 EXPORT_SYMBOL(sysctl_tcp_syncookies);
93101
93102@@ -717,7 +721,10 @@ embryonic_reset:
93103 * avoid becoming vulnerable to outside attack aiming at
93104 * resetting legit local connections.
93105 */
93106- req->rsk_ops->send_reset(sk, skb);
93107+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93108+ if (!grsec_enable_blackhole)
93109+#endif
93110+ req->rsk_ops->send_reset(sk, skb);
93111 } else if (fastopen) { /* received a valid RST pkt */
93112 reqsk_fastopen_remove(sk, req, true);
93113 tcp_reset(sk);
93114diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
93115index d4943f6..e7a74a5 100644
93116--- a/net/ipv4/tcp_probe.c
93117+++ b/net/ipv4/tcp_probe.c
93118@@ -204,7 +204,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
93119 if (cnt + width >= len)
93120 break;
93121
93122- if (copy_to_user(buf + cnt, tbuf, width))
93123+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
93124 return -EFAULT;
93125 cnt += width;
93126 }
93127diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
93128index 4b85e6f..22f9ac9 100644
93129--- a/net/ipv4/tcp_timer.c
93130+++ b/net/ipv4/tcp_timer.c
93131@@ -22,6 +22,10 @@
93132 #include <linux/gfp.h>
93133 #include <net/tcp.h>
93134
93135+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93136+extern int grsec_lastack_retries;
93137+#endif
93138+
93139 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
93140 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
93141 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
93142@@ -185,6 +189,13 @@ static int tcp_write_timeout(struct sock *sk)
93143 }
93144 }
93145
93146+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93147+ if ((sk->sk_state == TCP_LAST_ACK) &&
93148+ (grsec_lastack_retries > 0) &&
93149+ (grsec_lastack_retries < retry_until))
93150+ retry_until = grsec_lastack_retries;
93151+#endif
93152+
93153 if (retransmits_timed_out(sk, retry_until,
93154 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
93155 /* Has it gone just too far? */
93156diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
93157index 93b731d..5a2dd92 100644
93158--- a/net/ipv4/udp.c
93159+++ b/net/ipv4/udp.c
93160@@ -87,6 +87,7 @@
93161 #include <linux/types.h>
93162 #include <linux/fcntl.h>
93163 #include <linux/module.h>
93164+#include <linux/security.h>
93165 #include <linux/socket.h>
93166 #include <linux/sockios.h>
93167 #include <linux/igmp.h>
93168@@ -111,6 +112,10 @@
93169 #include <trace/events/skb.h>
93170 #include "udp_impl.h"
93171
93172+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93173+extern int grsec_enable_blackhole;
93174+#endif
93175+
93176 struct udp_table udp_table __read_mostly;
93177 EXPORT_SYMBOL(udp_table);
93178
93179@@ -594,6 +599,9 @@ found:
93180 return s;
93181 }
93182
93183+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
93184+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
93185+
93186 /*
93187 * This routine is called by the ICMP module when it gets some
93188 * sort of error condition. If err < 0 then the socket should
93189@@ -890,9 +898,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
93190 dport = usin->sin_port;
93191 if (dport == 0)
93192 return -EINVAL;
93193+
93194+ err = gr_search_udp_sendmsg(sk, usin);
93195+ if (err)
93196+ return err;
93197 } else {
93198 if (sk->sk_state != TCP_ESTABLISHED)
93199 return -EDESTADDRREQ;
93200+
93201+ err = gr_search_udp_sendmsg(sk, NULL);
93202+ if (err)
93203+ return err;
93204+
93205 daddr = inet->inet_daddr;
93206 dport = inet->inet_dport;
93207 /* Open fast path for connected socket.
93208@@ -1136,7 +1153,7 @@ static unsigned int first_packet_length(struct sock *sk)
93209 IS_UDPLITE(sk));
93210 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
93211 IS_UDPLITE(sk));
93212- atomic_inc(&sk->sk_drops);
93213+ atomic_inc_unchecked(&sk->sk_drops);
93214 __skb_unlink(skb, rcvq);
93215 __skb_queue_tail(&list_kill, skb);
93216 }
93217@@ -1222,6 +1239,10 @@ try_again:
93218 if (!skb)
93219 goto out;
93220
93221+ err = gr_search_udp_recvmsg(sk, skb);
93222+ if (err)
93223+ goto out_free;
93224+
93225 ulen = skb->len - sizeof(struct udphdr);
93226 copied = len;
93227 if (copied > ulen)
93228@@ -1255,7 +1276,7 @@ try_again:
93229 if (unlikely(err)) {
93230 trace_kfree_skb(skb, udp_recvmsg);
93231 if (!peeked) {
93232- atomic_inc(&sk->sk_drops);
93233+ atomic_inc_unchecked(&sk->sk_drops);
93234 UDP_INC_STATS_USER(sock_net(sk),
93235 UDP_MIB_INERRORS, is_udplite);
93236 }
93237@@ -1542,7 +1563,7 @@ csum_error:
93238 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
93239 drop:
93240 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
93241- atomic_inc(&sk->sk_drops);
93242+ atomic_inc_unchecked(&sk->sk_drops);
93243 kfree_skb(skb);
93244 return -1;
93245 }
93246@@ -1561,7 +1582,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
93247 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
93248
93249 if (!skb1) {
93250- atomic_inc(&sk->sk_drops);
93251+ atomic_inc_unchecked(&sk->sk_drops);
93252 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
93253 IS_UDPLITE(sk));
93254 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
93255@@ -1730,6 +1751,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
93256 goto csum_error;
93257
93258 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
93259+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93260+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
93261+#endif
93262 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
93263
93264 /*
93265@@ -2160,7 +2184,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
93266 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
93267 0, sock_i_ino(sp),
93268 atomic_read(&sp->sk_refcnt), sp,
93269- atomic_read(&sp->sk_drops), len);
93270+ atomic_read_unchecked(&sp->sk_drops), len);
93271 }
93272
93273 int udp4_seq_show(struct seq_file *seq, void *v)
93274diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
93275index 9a459be..086b866 100644
93276--- a/net/ipv4/xfrm4_policy.c
93277+++ b/net/ipv4/xfrm4_policy.c
93278@@ -264,19 +264,18 @@ static struct ctl_table xfrm4_policy_table[] = {
93279
93280 static int __net_init xfrm4_net_init(struct net *net)
93281 {
93282- struct ctl_table *table;
93283+ ctl_table_no_const *table = NULL;
93284 struct ctl_table_header *hdr;
93285
93286- table = xfrm4_policy_table;
93287 if (!net_eq(net, &init_net)) {
93288- table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
93289+ table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL);
93290 if (!table)
93291 goto err_alloc;
93292
93293 table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
93294- }
93295-
93296- hdr = register_net_sysctl(net, "net/ipv4", table);
93297+ hdr = register_net_sysctl(net, "net/ipv4", table);
93298+ } else
93299+ hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table);
93300 if (!hdr)
93301 goto err_reg;
93302
93303@@ -284,8 +283,7 @@ static int __net_init xfrm4_net_init(struct net *net)
93304 return 0;
93305
93306 err_reg:
93307- if (!net_eq(net, &init_net))
93308- kfree(table);
93309+ kfree(table);
93310 err_alloc:
93311 return -ENOMEM;
93312 }
93313diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
e2b79cd1 93314index fb8c94c..80a31d8 100644
bb5f0bf8
AF
93315--- a/net/ipv6/addrconf.c
93316+++ b/net/ipv6/addrconf.c
93317@@ -621,7 +621,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
93318 idx = 0;
93319 head = &net->dev_index_head[h];
93320 rcu_read_lock();
93321- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
93322+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^
93323 net->dev_base_seq;
93324 hlist_for_each_entry_rcu(dev, head, index_hlist) {
93325 if (idx < s_idx)
e2b79cd1
AF
93326@@ -1124,12 +1124,10 @@ retry:
93327 if (ifp->flags & IFA_F_OPTIMISTIC)
93328 addr_flags |= IFA_F_OPTIMISTIC;
93329
93330- ift = !max_addresses ||
93331- ipv6_count_addresses(idev) < max_addresses ?
93332- ipv6_add_addr(idev, &addr, tmp_plen,
93333- ipv6_addr_type(&addr)&IPV6_ADDR_SCOPE_MASK,
93334- addr_flags) : NULL;
93335- if (IS_ERR_OR_NULL(ift)) {
93336+ ift = ipv6_add_addr(idev, &addr, tmp_plen,
93337+ ipv6_addr_type(&addr)&IPV6_ADDR_SCOPE_MASK,
93338+ addr_flags);
93339+ if (IS_ERR(ift)) {
93340 in6_ifa_put(ifp);
93341 in6_dev_put(idev);
93342 pr_info("%s: retry temporary address regeneration\n", __func__);
93343@@ -2380,7 +2378,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
bb5f0bf8
AF
93344 p.iph.ihl = 5;
93345 p.iph.protocol = IPPROTO_IPV6;
93346 p.iph.ttl = 64;
93347- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
93348+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
93349
93350 if (ops->ndo_do_ioctl) {
93351 mm_segment_t oldfs = get_fs();
e2b79cd1 93352@@ -4002,7 +4000,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
bb5f0bf8
AF
93353 s_ip_idx = ip_idx = cb->args[2];
93354
93355 rcu_read_lock();
93356- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
93357+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
93358 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
93359 idx = 0;
93360 head = &net->dev_index_head[h];
e2b79cd1 93361@@ -4587,7 +4585,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
bb5f0bf8
AF
93362 dst_free(&ifp->rt->dst);
93363 break;
93364 }
93365- atomic_inc(&net->ipv6.dev_addr_genid);
93366+ atomic_inc_unchecked(&net->ipv6.dev_addr_genid);
93367 }
93368
93369 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
e2b79cd1 93370@@ -4607,7 +4605,7 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write,
bb5f0bf8
AF
93371 int *valp = ctl->data;
93372 int val = *valp;
93373 loff_t pos = *ppos;
93374- ctl_table lctl;
93375+ ctl_table_no_const lctl;
93376 int ret;
93377
93378 /*
e2b79cd1 93379@@ -4689,7 +4687,7 @@ int addrconf_sysctl_disable(ctl_table *ctl, int write,
bb5f0bf8
AF
93380 int *valp = ctl->data;
93381 int val = *valp;
93382 loff_t pos = *ppos;
93383- ctl_table lctl;
93384+ ctl_table_no_const lctl;
93385 int ret;
93386
93387 /*
93388diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
93389index 40ffd72..aeac0dc 100644
93390--- a/net/ipv6/esp6.c
93391+++ b/net/ipv6/esp6.c
93392@@ -425,7 +425,7 @@ static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
93393 net_adj = 0;
93394
93395 return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
93396- net_adj) & ~(align - 1)) + (net_adj - 2);
93397+ net_adj) & ~(align - 1)) + net_adj - 2;
93398 }
93399
93400 static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
93401diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
93402index b4ff0a4..db9b764 100644
93403--- a/net/ipv6/icmp.c
93404+++ b/net/ipv6/icmp.c
93405@@ -980,7 +980,7 @@ ctl_table ipv6_icmp_table_template[] = {
93406
93407 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
93408 {
93409- struct ctl_table *table;
93410+ ctl_table_no_const *table;
93411
93412 table = kmemdup(ipv6_icmp_table_template,
93413 sizeof(ipv6_icmp_table_template),
93414diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
93415index ecd6073..58162ae 100644
93416--- a/net/ipv6/ip6_gre.c
93417+++ b/net/ipv6/ip6_gre.c
93418@@ -74,7 +74,7 @@ struct ip6gre_net {
93419 struct net_device *fb_tunnel_dev;
93420 };
93421
93422-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
93423+static struct rtnl_link_ops ip6gre_link_ops;
93424 static int ip6gre_tunnel_init(struct net_device *dev);
93425 static void ip6gre_tunnel_setup(struct net_device *dev);
93426 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
93427@@ -1283,7 +1283,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
93428 }
93429
93430
93431-static struct inet6_protocol ip6gre_protocol __read_mostly = {
93432+static struct inet6_protocol ip6gre_protocol = {
93433 .handler = ip6gre_rcv,
93434 .err_handler = ip6gre_err,
93435 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
93436@@ -1617,7 +1617,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
93437 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
93438 };
93439
93440-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
93441+static struct rtnl_link_ops ip6gre_link_ops = {
93442 .kind = "ip6gre",
93443 .maxtype = IFLA_GRE_MAX,
93444 .policy = ip6gre_policy,
93445@@ -1630,7 +1630,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
93446 .fill_info = ip6gre_fill_info,
93447 };
93448
93449-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
93450+static struct rtnl_link_ops ip6gre_tap_ops = {
93451 .kind = "ip6gretap",
93452 .maxtype = IFLA_GRE_MAX,
93453 .policy = ip6gre_policy,
93454diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
93455index 1e55866..b398dab 100644
93456--- a/net/ipv6/ip6_tunnel.c
93457+++ b/net/ipv6/ip6_tunnel.c
93458@@ -88,7 +88,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
93459
93460 static int ip6_tnl_dev_init(struct net_device *dev);
93461 static void ip6_tnl_dev_setup(struct net_device *dev);
93462-static struct rtnl_link_ops ip6_link_ops __read_mostly;
93463+static struct rtnl_link_ops ip6_link_ops;
93464
93465 static int ip6_tnl_net_id __read_mostly;
93466 struct ip6_tnl_net {
93467@@ -1672,7 +1672,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
93468 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
93469 };
93470
93471-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
93472+static struct rtnl_link_ops ip6_link_ops = {
93473 .kind = "ip6tnl",
93474 .maxtype = IFLA_IPTUN_MAX,
93475 .policy = ip6_tnl_policy,
93476diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
93477index d1e2e8e..51c19ae 100644
93478--- a/net/ipv6/ipv6_sockglue.c
93479+++ b/net/ipv6/ipv6_sockglue.c
93480@@ -991,7 +991,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
93481 if (sk->sk_type != SOCK_STREAM)
93482 return -ENOPROTOOPT;
93483
93484- msg.msg_control = optval;
93485+ msg.msg_control = (void __force_kernel *)optval;
93486 msg.msg_controllen = len;
93487 msg.msg_flags = flags;
93488
93489diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
93490index 44400c2..8e11f52 100644
93491--- a/net/ipv6/netfilter/ip6_tables.c
93492+++ b/net/ipv6/netfilter/ip6_tables.c
93493@@ -1078,14 +1078,14 @@ static int compat_table_info(const struct xt_table_info *info,
93494 #endif
93495
93496 static int get_info(struct net *net, void __user *user,
93497- const int *len, int compat)
93498+ int len, int compat)
93499 {
93500 char name[XT_TABLE_MAXNAMELEN];
93501 struct xt_table *t;
93502 int ret;
93503
93504- if (*len != sizeof(struct ip6t_getinfo)) {
93505- duprintf("length %u != %zu\n", *len,
93506+ if (len != sizeof(struct ip6t_getinfo)) {
93507+ duprintf("length %u != %zu\n", len,
93508 sizeof(struct ip6t_getinfo));
93509 return -EINVAL;
93510 }
93511@@ -1122,7 +1122,7 @@ static int get_info(struct net *net, void __user *user,
93512 info.size = private->size;
93513 strcpy(info.name, name);
93514
93515- if (copy_to_user(user, &info, *len) != 0)
93516+ if (copy_to_user(user, &info, len) != 0)
93517 ret = -EFAULT;
93518 else
93519 ret = 0;
93520@@ -1976,7 +1976,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
93521
93522 switch (cmd) {
93523 case IP6T_SO_GET_INFO:
93524- ret = get_info(sock_net(sk), user, len, 1);
93525+ ret = get_info(sock_net(sk), user, *len, 1);
93526 break;
93527 case IP6T_SO_GET_ENTRIES:
93528 ret = compat_get_entries(sock_net(sk), user, len);
93529@@ -2023,7 +2023,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
93530
93531 switch (cmd) {
93532 case IP6T_SO_GET_INFO:
93533- ret = get_info(sock_net(sk), user, len, 0);
93534+ ret = get_info(sock_net(sk), user, *len, 0);
93535 break;
93536
93537 case IP6T_SO_GET_ENTRIES:
93538diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
93539index dffdc1a..ccc6678 100644
93540--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
93541+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
93542@@ -90,12 +90,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
93543
93544 static int nf_ct_frag6_sysctl_register(struct net *net)
93545 {
93546- struct ctl_table *table;
93547+ ctl_table_no_const *table = NULL;
93548 struct ctl_table_header *hdr;
93549
93550- table = nf_ct_frag6_sysctl_table;
93551 if (!net_eq(net, &init_net)) {
93552- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
93553+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
93554 GFP_KERNEL);
93555 if (table == NULL)
93556 goto err_alloc;
93557@@ -103,9 +102,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
93558 table[0].data = &net->nf_frag.frags.timeout;
93559 table[1].data = &net->nf_frag.frags.low_thresh;
93560 table[2].data = &net->nf_frag.frags.high_thresh;
93561- }
93562-
93563- hdr = register_net_sysctl(net, "net/netfilter", table);
93564+ hdr = register_net_sysctl(net, "net/netfilter", table);
93565+ } else
93566+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
93567 if (hdr == NULL)
93568 goto err_reg;
93569
93570@@ -113,8 +112,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
93571 return 0;
93572
93573 err_reg:
93574- if (!net_eq(net, &init_net))
93575- kfree(table);
93576+ kfree(table);
93577 err_alloc:
93578 return -ENOMEM;
93579 }
93580diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
e2b79cd1 93581index eedff8c..7d7e24a 100644
bb5f0bf8
AF
93582--- a/net/ipv6/raw.c
93583+++ b/net/ipv6/raw.c
e2b79cd1
AF
93584@@ -108,7 +108,7 @@ found:
93585 */
93586 static int icmpv6_filter(const struct sock *sk, const struct sk_buff *skb)
93587 {
93588- struct icmp6hdr *_hdr;
93589+ struct icmp6hdr _hdr;
93590 const struct icmp6hdr *hdr;
93591
93592 hdr = skb_header_pointer(skb, skb_transport_offset(skb),
bb5f0bf8
AF
93593@@ -378,7 +378,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
93594 {
93595 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
93596 skb_checksum_complete(skb)) {
93597- atomic_inc(&sk->sk_drops);
93598+ atomic_inc_unchecked(&sk->sk_drops);
93599 kfree_skb(skb);
93600 return NET_RX_DROP;
93601 }
93602@@ -406,7 +406,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
93603 struct raw6_sock *rp = raw6_sk(sk);
93604
93605 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
93606- atomic_inc(&sk->sk_drops);
93607+ atomic_inc_unchecked(&sk->sk_drops);
93608 kfree_skb(skb);
93609 return NET_RX_DROP;
93610 }
93611@@ -430,7 +430,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
93612
93613 if (inet->hdrincl) {
93614 if (skb_checksum_complete(skb)) {
93615- atomic_inc(&sk->sk_drops);
93616+ atomic_inc_unchecked(&sk->sk_drops);
93617 kfree_skb(skb);
93618 return NET_RX_DROP;
93619 }
93620@@ -602,7 +602,7 @@ out:
93621 return err;
93622 }
93623
93624-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
93625+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
93626 struct flowi6 *fl6, struct dst_entry **dstp,
93627 unsigned int flags)
93628 {
93629@@ -914,12 +914,15 @@ do_confirm:
93630 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
93631 char __user *optval, int optlen)
93632 {
93633+ struct icmp6_filter filter;
93634+
93635 switch (optname) {
93636 case ICMPV6_FILTER:
93637 if (optlen > sizeof(struct icmp6_filter))
93638 optlen = sizeof(struct icmp6_filter);
93639- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
93640+ if (copy_from_user(&filter, optval, optlen))
93641 return -EFAULT;
93642+ raw6_sk(sk)->filter = filter;
93643 return 0;
93644 default:
93645 return -ENOPROTOOPT;
93646@@ -932,6 +935,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
93647 char __user *optval, int __user *optlen)
93648 {
93649 int len;
93650+ struct icmp6_filter filter;
93651
93652 switch (optname) {
93653 case ICMPV6_FILTER:
93654@@ -943,7 +947,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
93655 len = sizeof(struct icmp6_filter);
93656 if (put_user(len, optlen))
93657 return -EFAULT;
93658- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
93659+ filter = raw6_sk(sk)->filter;
93660+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
93661 return -EFAULT;
93662 return 0;
93663 default:
93664@@ -1251,7 +1256,7 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
93665 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
93666 0,
93667 sock_i_ino(sp),
93668- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
93669+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
93670 }
93671
93672 static int raw6_seq_show(struct seq_file *seq, void *v)
93673diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
93674index 790d9f4..68ae078 100644
93675--- a/net/ipv6/reassembly.c
93676+++ b/net/ipv6/reassembly.c
93677@@ -621,12 +621,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
93678
93679 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
93680 {
93681- struct ctl_table *table;
93682+ ctl_table_no_const *table = NULL;
93683 struct ctl_table_header *hdr;
93684
93685- table = ip6_frags_ns_ctl_table;
93686 if (!net_eq(net, &init_net)) {
93687- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
93688+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
93689 if (table == NULL)
93690 goto err_alloc;
93691
93692@@ -637,9 +636,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
93693 /* Don't export sysctls to unprivileged users */
93694 if (net->user_ns != &init_user_ns)
93695 table[0].procname = NULL;
93696- }
93697+ hdr = register_net_sysctl(net, "net/ipv6", table);
93698+ } else
93699+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
93700
93701- hdr = register_net_sysctl(net, "net/ipv6", table);
93702 if (hdr == NULL)
93703 goto err_reg;
93704
93705@@ -647,8 +647,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
93706 return 0;
93707
93708 err_reg:
93709- if (!net_eq(net, &init_net))
93710- kfree(table);
93711+ kfree(table);
93712 err_alloc:
93713 return -ENOMEM;
93714 }
93715diff --git a/net/ipv6/route.c b/net/ipv6/route.c
93716index bacce6c..9d1741a 100644
93717--- a/net/ipv6/route.c
93718+++ b/net/ipv6/route.c
93719@@ -2903,7 +2903,7 @@ ctl_table ipv6_route_table_template[] = {
93720
93721 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
93722 {
93723- struct ctl_table *table;
93724+ ctl_table_no_const *table;
93725
93726 table = kmemdup(ipv6_route_table_template,
93727 sizeof(ipv6_route_table_template),
93728diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
93729index 60df36d..f3ab7c8 100644
93730--- a/net/ipv6/sit.c
93731+++ b/net/ipv6/sit.c
93732@@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
93733 static void ipip6_dev_free(struct net_device *dev);
93734 static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
93735 __be32 *v4dst);
93736-static struct rtnl_link_ops sit_link_ops __read_mostly;
93737+static struct rtnl_link_ops sit_link_ops;
93738
93739 static int sit_net_id __read_mostly;
93740 struct sit_net {
93741@@ -1453,7 +1453,7 @@ static const struct nla_policy ipip6_policy[IFLA_IPTUN_MAX + 1] = {
93742 #endif
93743 };
93744
93745-static struct rtnl_link_ops sit_link_ops __read_mostly = {
93746+static struct rtnl_link_ops sit_link_ops = {
93747 .kind = "sit",
93748 .maxtype = IFLA_IPTUN_MAX,
93749 .policy = ipip6_policy,
93750diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
93751index e85c48b..b8268d3 100644
93752--- a/net/ipv6/sysctl_net_ipv6.c
93753+++ b/net/ipv6/sysctl_net_ipv6.c
93754@@ -40,7 +40,7 @@ static ctl_table ipv6_rotable[] = {
93755
93756 static int __net_init ipv6_sysctl_net_init(struct net *net)
93757 {
93758- struct ctl_table *ipv6_table;
93759+ ctl_table_no_const *ipv6_table;
93760 struct ctl_table *ipv6_route_table;
93761 struct ctl_table *ipv6_icmp_table;
93762 int err;
93763diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
93764index 0a17ed9..2526cc3 100644
93765--- a/net/ipv6/tcp_ipv6.c
93766+++ b/net/ipv6/tcp_ipv6.c
93767@@ -103,6 +103,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
93768 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
93769 }
93770
93771+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93772+extern int grsec_enable_blackhole;
93773+#endif
93774+
93775 static void tcp_v6_hash(struct sock *sk)
93776 {
93777 if (sk->sk_state != TCP_CLOSE) {
93778@@ -1398,6 +1402,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
93779 return 0;
93780
93781 reset:
93782+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93783+ if (!grsec_enable_blackhole)
93784+#endif
93785 tcp_v6_send_reset(sk, skb);
93786 discard:
93787 if (opt_skb)
93788@@ -1480,12 +1487,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
93789 TCP_SKB_CB(skb)->sacked = 0;
93790
93791 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
93792- if (!sk)
93793+ if (!sk) {
93794+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93795+ ret = 1;
93796+#endif
93797 goto no_tcp_socket;
93798+ }
93799
93800 process:
93801- if (sk->sk_state == TCP_TIME_WAIT)
93802+ if (sk->sk_state == TCP_TIME_WAIT) {
93803+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93804+ ret = 2;
93805+#endif
93806 goto do_time_wait;
93807+ }
93808
93809 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
93810 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
93811@@ -1536,6 +1551,10 @@ csum_error:
93812 bad_packet:
93813 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
93814 } else {
93815+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93816+ if (!grsec_enable_blackhole || (ret == 1 &&
93817+ (skb->dev->flags & IFF_LOOPBACK)))
93818+#endif
93819 tcp_v6_send_reset(NULL, skb);
93820 }
93821
93822diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
93823index e7b28f9..d09c290 100644
93824--- a/net/ipv6/udp.c
93825+++ b/net/ipv6/udp.c
93826@@ -52,6 +52,10 @@
93827 #include <trace/events/skb.h>
93828 #include "udp_impl.h"
93829
93830+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93831+extern int grsec_enable_blackhole;
93832+#endif
93833+
93834 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
93835 {
93836 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
93837@@ -419,7 +423,7 @@ try_again:
93838 if (unlikely(err)) {
93839 trace_kfree_skb(skb, udpv6_recvmsg);
93840 if (!peeked) {
93841- atomic_inc(&sk->sk_drops);
93842+ atomic_inc_unchecked(&sk->sk_drops);
93843 if (is_udp4)
93844 UDP_INC_STATS_USER(sock_net(sk),
93845 UDP_MIB_INERRORS,
93846@@ -665,7 +669,7 @@ csum_error:
93847 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
93848 drop:
93849 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
93850- atomic_inc(&sk->sk_drops);
93851+ atomic_inc_unchecked(&sk->sk_drops);
93852 kfree_skb(skb);
93853 return -1;
93854 }
93855@@ -723,7 +727,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
93856 if (likely(skb1 == NULL))
93857 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
93858 if (!skb1) {
93859- atomic_inc(&sk->sk_drops);
93860+ atomic_inc_unchecked(&sk->sk_drops);
93861 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
93862 IS_UDPLITE(sk));
93863 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
93864@@ -860,6 +864,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
93865 goto csum_error;
93866
93867 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
93868+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93869+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
93870+#endif
93871 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
93872
93873 kfree_skb(skb);
93874@@ -1392,7 +1399,7 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
93875 0,
93876 sock_i_ino(sp),
93877 atomic_read(&sp->sk_refcnt), sp,
93878- atomic_read(&sp->sk_drops));
93879+ atomic_read_unchecked(&sp->sk_drops));
93880 }
93881
93882 int udp6_seq_show(struct seq_file *seq, void *v)
93883diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
93884index 23ed03d..465a71d 100644
93885--- a/net/ipv6/xfrm6_policy.c
93886+++ b/net/ipv6/xfrm6_policy.c
93887@@ -324,19 +324,19 @@ static struct ctl_table xfrm6_policy_table[] = {
93888
93889 static int __net_init xfrm6_net_init(struct net *net)
93890 {
93891- struct ctl_table *table;
93892+ ctl_table_no_const *table = NULL;
93893 struct ctl_table_header *hdr;
93894
93895- table = xfrm6_policy_table;
93896 if (!net_eq(net, &init_net)) {
93897- table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
93898+ table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL);
93899 if (!table)
93900 goto err_alloc;
93901
93902 table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
93903- }
93904+ hdr = register_net_sysctl(net, "net/ipv6", table);
93905+ } else
93906+ hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table);
93907
93908- hdr = register_net_sysctl(net, "net/ipv6", table);
93909 if (!hdr)
93910 goto err_reg;
93911
93912@@ -344,8 +344,7 @@ static int __net_init xfrm6_net_init(struct net *net)
93913 return 0;
93914
93915 err_reg:
93916- if (!net_eq(net, &init_net))
93917- kfree(table);
93918+ kfree(table);
93919 err_alloc:
93920 return -ENOMEM;
93921 }
93922diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
93923index 41ac7938..75e3bb1 100644
93924--- a/net/irda/ircomm/ircomm_tty.c
93925+++ b/net/irda/ircomm/ircomm_tty.c
93926@@ -319,11 +319,11 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
93927 add_wait_queue(&port->open_wait, &wait);
93928
93929 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
93930- __FILE__, __LINE__, tty->driver->name, port->count);
93931+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
93932
93933 spin_lock_irqsave(&port->lock, flags);
93934 if (!tty_hung_up_p(filp))
93935- port->count--;
93936+ atomic_dec(&port->count);
93937 port->blocked_open++;
93938 spin_unlock_irqrestore(&port->lock, flags);
93939
93940@@ -358,7 +358,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
93941 }
93942
93943 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
93944- __FILE__, __LINE__, tty->driver->name, port->count);
93945+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
93946
93947 schedule();
93948 }
93949@@ -368,12 +368,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
93950
93951 spin_lock_irqsave(&port->lock, flags);
93952 if (!tty_hung_up_p(filp))
93953- port->count++;
93954+ atomic_inc(&port->count);
93955 port->blocked_open--;
93956 spin_unlock_irqrestore(&port->lock, flags);
93957
93958 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
93959- __FILE__, __LINE__, tty->driver->name, port->count);
93960+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
93961
93962 if (!retval)
93963 port->flags |= ASYNC_NORMAL_ACTIVE;
93964@@ -447,12 +447,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
93965
93966 /* ++ is not atomic, so this should be protected - Jean II */
93967 spin_lock_irqsave(&self->port.lock, flags);
93968- self->port.count++;
93969+ atomic_inc(&self->port.count);
93970 spin_unlock_irqrestore(&self->port.lock, flags);
93971 tty_port_tty_set(&self->port, tty);
93972
93973 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
93974- self->line, self->port.count);
93975+ self->line, atomic_read(&self->port.count));
93976
93977 /* Not really used by us, but lets do it anyway */
93978 self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
93979@@ -989,7 +989,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
93980 tty_kref_put(port->tty);
93981 }
93982 port->tty = NULL;
93983- port->count = 0;
93984+ atomic_set(&port->count, 0);
93985 spin_unlock_irqrestore(&port->lock, flags);
93986
93987 wake_up_interruptible(&port->open_wait);
93988@@ -1346,7 +1346,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
93989 seq_putc(m, '\n');
93990
93991 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
93992- seq_printf(m, "Open count: %d\n", self->port.count);
93993+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
93994 seq_printf(m, "Max data size: %d\n", self->max_data_size);
93995 seq_printf(m, "Max header size: %d\n", self->max_header_size);
93996
93997diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
93998index ae69165..c8b82d8 100644
93999--- a/net/iucv/af_iucv.c
94000+++ b/net/iucv/af_iucv.c
94001@@ -773,10 +773,10 @@ static int iucv_sock_autobind(struct sock *sk)
94002
94003 write_lock_bh(&iucv_sk_list.lock);
94004
94005- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
94006+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
94007 while (__iucv_get_sock_by_name(name)) {
94008 sprintf(name, "%08x",
94009- atomic_inc_return(&iucv_sk_list.autobind_name));
94010+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
94011 }
94012
94013 write_unlock_bh(&iucv_sk_list.lock);
94014diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
94015index 4fe76ff..426a904 100644
94016--- a/net/iucv/iucv.c
94017+++ b/net/iucv/iucv.c
94018@@ -690,7 +690,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
94019 return NOTIFY_OK;
94020 }
94021
94022-static struct notifier_block __refdata iucv_cpu_notifier = {
94023+static struct notifier_block iucv_cpu_notifier = {
94024 .notifier_call = iucv_cpu_notify,
94025 };
94026
94027diff --git a/net/key/af_key.c b/net/key/af_key.c
94028index ab8bd2c..cd2d641 100644
94029--- a/net/key/af_key.c
94030+++ b/net/key/af_key.c
94031@@ -3048,10 +3048,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
94032 static u32 get_acqseq(void)
94033 {
94034 u32 res;
94035- static atomic_t acqseq;
94036+ static atomic_unchecked_t acqseq;
94037
94038 do {
94039- res = atomic_inc_return(&acqseq);
94040+ res = atomic_inc_return_unchecked(&acqseq);
94041 } while (!res);
94042 return res;
94043 }
94044diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
94045index ae36f8e..09d42ac 100644
94046--- a/net/mac80211/cfg.c
94047+++ b/net/mac80211/cfg.c
94048@@ -806,7 +806,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
94049 ret = ieee80211_vif_use_channel(sdata, chandef,
94050 IEEE80211_CHANCTX_EXCLUSIVE);
94051 }
94052- } else if (local->open_count == local->monitors) {
94053+ } else if (local_read(&local->open_count) == local->monitors) {
94054 local->_oper_chandef = *chandef;
94055 ieee80211_hw_config(local, 0);
94056 }
94057@@ -2922,7 +2922,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
94058 else
94059 local->probe_req_reg--;
94060
94061- if (!local->open_count)
94062+ if (!local_read(&local->open_count))
94063 break;
94064
94065 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
94066@@ -3385,8 +3385,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
94067 if (chanctx_conf) {
94068 *chandef = chanctx_conf->def;
94069 ret = 0;
94070- } else if (local->open_count > 0 &&
94071- local->open_count == local->monitors &&
94072+ } else if (local_read(&local->open_count) > 0 &&
94073+ local_read(&local->open_count) == local->monitors &&
94074 sdata->vif.type == NL80211_IFTYPE_MONITOR) {
94075 if (local->use_chanctx)
94076 *chandef = local->monitor_chandef;
94077diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
94078index 9ca8e32..48e4a9b 100644
94079--- a/net/mac80211/ieee80211_i.h
94080+++ b/net/mac80211/ieee80211_i.h
94081@@ -28,6 +28,7 @@
94082 #include <net/ieee80211_radiotap.h>
94083 #include <net/cfg80211.h>
94084 #include <net/mac80211.h>
94085+#include <asm/local.h>
94086 #include "key.h"
94087 #include "sta_info.h"
94088 #include "debug.h"
94089@@ -891,7 +892,7 @@ struct ieee80211_local {
94090 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
94091 spinlock_t queue_stop_reason_lock;
94092
94093- int open_count;
94094+ local_t open_count;
94095 int monitors, cooked_mntrs;
94096 /* number of interfaces with corresponding FIF_ flags */
94097 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
94098diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
94099index 514e90f..56f22bf 100644
94100--- a/net/mac80211/iface.c
94101+++ b/net/mac80211/iface.c
94102@@ -502,7 +502,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
94103 break;
94104 }
94105
94106- if (local->open_count == 0) {
94107+ if (local_read(&local->open_count) == 0) {
94108 res = drv_start(local);
94109 if (res)
94110 goto err_del_bss;
94111@@ -545,7 +545,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
94112 break;
94113 }
94114
94115- if (local->monitors == 0 && local->open_count == 0) {
94116+ if (local->monitors == 0 && local_read(&local->open_count) == 0) {
94117 res = ieee80211_add_virtual_monitor(local);
94118 if (res)
94119 goto err_stop;
94120@@ -653,7 +653,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
94121 atomic_inc(&local->iff_promiscs);
94122
94123 if (coming_up)
94124- local->open_count++;
94125+ local_inc(&local->open_count);
94126
94127 if (hw_reconf_flags)
94128 ieee80211_hw_config(local, hw_reconf_flags);
94129@@ -691,7 +691,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
94130 err_del_interface:
94131 drv_remove_interface(local, sdata);
94132 err_stop:
94133- if (!local->open_count)
94134+ if (!local_read(&local->open_count))
94135 drv_stop(local);
94136 err_del_bss:
94137 sdata->bss = NULL;
94138@@ -828,7 +828,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
94139 }
94140
94141 if (going_down)
94142- local->open_count--;
94143+ local_dec(&local->open_count);
94144
94145 switch (sdata->vif.type) {
94146 case NL80211_IFTYPE_AP_VLAN:
94147@@ -895,7 +895,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
94148 }
94149 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
94150
94151- if (local->open_count == 0)
94152+ if (local_read(&local->open_count) == 0)
94153 ieee80211_clear_tx_pending(local);
94154
94155 /*
94156@@ -931,7 +931,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
94157
94158 ieee80211_recalc_ps(local, -1);
94159
94160- if (local->open_count == 0) {
94161+ if (local_read(&local->open_count) == 0) {
94162 ieee80211_stop_device(local);
94163
94164 /* no reconfiguring after stop! */
94165@@ -942,7 +942,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
94166 ieee80211_configure_filter(local);
94167 ieee80211_hw_config(local, hw_reconf_flags);
94168
94169- if (local->monitors == local->open_count)
94170+ if (local->monitors == local_read(&local->open_count))
94171 ieee80211_add_virtual_monitor(local);
94172 }
94173
94174diff --git a/net/mac80211/main.c b/net/mac80211/main.c
94175index 8a7bfc4..4407cd0 100644
94176--- a/net/mac80211/main.c
94177+++ b/net/mac80211/main.c
94178@@ -181,7 +181,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
94179 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
94180 IEEE80211_CONF_CHANGE_POWER);
94181
94182- if (changed && local->open_count) {
94183+ if (changed && local_read(&local->open_count)) {
94184 ret = drv_config(local, changed);
94185 /*
94186 * Goal:
94187diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
94188index 3401262..d5cd68d 100644
94189--- a/net/mac80211/pm.c
94190+++ b/net/mac80211/pm.c
94191@@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
94192 struct ieee80211_sub_if_data *sdata;
94193 struct sta_info *sta;
94194
94195- if (!local->open_count)
94196+ if (!local_read(&local->open_count))
94197 goto suspend;
94198
94199 ieee80211_scan_cancel(local);
94200@@ -59,7 +59,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
94201 cancel_work_sync(&local->dynamic_ps_enable_work);
94202 del_timer_sync(&local->dynamic_ps_timer);
94203
94204- local->wowlan = wowlan && local->open_count;
94205+ local->wowlan = wowlan && local_read(&local->open_count);
94206 if (local->wowlan) {
94207 int err = drv_suspend(local, wowlan);
94208 if (err < 0) {
94209@@ -116,7 +116,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
94210 WARN_ON(!list_empty(&local->chanctx_list));
94211
94212 /* stop hardware - this must stop RX */
94213- if (local->open_count)
94214+ if (local_read(&local->open_count))
94215 ieee80211_stop_device(local);
94216
94217 suspend:
94218diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
94219index a02bef3..f2f38dd 100644
94220--- a/net/mac80211/rate.c
94221+++ b/net/mac80211/rate.c
94222@@ -712,7 +712,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
94223
94224 ASSERT_RTNL();
94225
94226- if (local->open_count)
94227+ if (local_read(&local->open_count))
94228 return -EBUSY;
94229
94230 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
94231diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
94232index c97a065..ff61928 100644
94233--- a/net/mac80211/rc80211_pid_debugfs.c
94234+++ b/net/mac80211/rc80211_pid_debugfs.c
94235@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
94236
94237 spin_unlock_irqrestore(&events->lock, status);
94238
94239- if (copy_to_user(buf, pb, p))
94240+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
94241 return -EFAULT;
94242
94243 return p;
94244diff --git a/net/mac80211/util.c b/net/mac80211/util.c
94245index 72e6292..e6319eb 100644
94246--- a/net/mac80211/util.c
94247+++ b/net/mac80211/util.c
94248@@ -1472,7 +1472,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
94249 }
94250 #endif
94251 /* everything else happens only if HW was up & running */
94252- if (!local->open_count)
94253+ if (!local_read(&local->open_count))
94254 goto wake_up;
94255
94256 /*
94257@@ -1696,7 +1696,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
94258 local->in_reconfig = false;
94259 barrier();
94260
94261- if (local->monitors == local->open_count && local->monitors > 0)
94262+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
94263 ieee80211_add_virtual_monitor(local);
94264
94265 /*
94266diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
94267index 56d22ca..87c778f 100644
94268--- a/net/netfilter/Kconfig
94269+++ b/net/netfilter/Kconfig
94270@@ -958,6 +958,16 @@ config NETFILTER_XT_MATCH_ESP
94271
94272 To compile it as a module, choose M here. If unsure, say N.
94273
94274+config NETFILTER_XT_MATCH_GRADM
94275+ tristate '"gradm" match support'
94276+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
94277+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
94278+ ---help---
94279+ The gradm match allows to match on grsecurity RBAC being enabled.
94280+ It is useful when iptables rules are applied early on bootup to
94281+ prevent connections to the machine (except from a trusted host)
94282+ while the RBAC system is disabled.
94283+
94284 config NETFILTER_XT_MATCH_HASHLIMIT
94285 tristate '"hashlimit" match support'
94286 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
94287diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
94288index a1abf87..dbcb7ee 100644
94289--- a/net/netfilter/Makefile
94290+++ b/net/netfilter/Makefile
94291@@ -112,6 +112,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
94292 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
94293 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
94294 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
94295+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
94296 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
94297 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
94298 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
94299diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
94300index f771390..145b765 100644
94301--- a/net/netfilter/ipset/ip_set_core.c
94302+++ b/net/netfilter/ipset/ip_set_core.c
94303@@ -1820,7 +1820,7 @@ done:
94304 return ret;
94305 }
94306
94307-static struct nf_sockopt_ops so_set __read_mostly = {
94308+static struct nf_sockopt_ops so_set = {
94309 .pf = PF_INET,
94310 .get_optmin = SO_IP_SET,
94311 .get_optmax = SO_IP_SET + 1,
94312diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
94313index a083bda..da661c3 100644
94314--- a/net/netfilter/ipvs/ip_vs_conn.c
94315+++ b/net/netfilter/ipvs/ip_vs_conn.c
94316@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
94317 /* Increase the refcnt counter of the dest */
94318 ip_vs_dest_hold(dest);
94319
94320- conn_flags = atomic_read(&dest->conn_flags);
94321+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
94322 if (cp->protocol != IPPROTO_UDP)
94323 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
94324 flags = cp->flags;
94325@@ -900,7 +900,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
94326
94327 cp->control = NULL;
94328 atomic_set(&cp->n_control, 0);
94329- atomic_set(&cp->in_pkts, 0);
94330+ atomic_set_unchecked(&cp->in_pkts, 0);
94331
94332 cp->packet_xmit = NULL;
94333 cp->app = NULL;
94334@@ -1190,7 +1190,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
94335
94336 /* Don't drop the entry if its number of incoming packets is not
94337 located in [0, 8] */
94338- i = atomic_read(&cp->in_pkts);
94339+ i = atomic_read_unchecked(&cp->in_pkts);
94340 if (i > 8 || i < 0) return 0;
94341
94342 if (!todrop_rate[i]) return 0;
94343diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
94344index 23b8eb5..48a8959 100644
94345--- a/net/netfilter/ipvs/ip_vs_core.c
94346+++ b/net/netfilter/ipvs/ip_vs_core.c
94347@@ -559,7 +559,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
94348 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
94349 /* do not touch skb anymore */
94350
94351- atomic_inc(&cp->in_pkts);
94352+ atomic_inc_unchecked(&cp->in_pkts);
94353 ip_vs_conn_put(cp);
94354 return ret;
94355 }
94356@@ -1711,7 +1711,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
94357 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
94358 pkts = sysctl_sync_threshold(ipvs);
94359 else
94360- pkts = atomic_add_return(1, &cp->in_pkts);
94361+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
94362
94363 if (ipvs->sync_state & IP_VS_STATE_MASTER)
94364 ip_vs_sync_conn(net, cp, pkts);
94365diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
94366index 9e6c2a0..28552e2 100644
94367--- a/net/netfilter/ipvs/ip_vs_ctl.c
94368+++ b/net/netfilter/ipvs/ip_vs_ctl.c
94369@@ -789,7 +789,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
94370 */
94371 ip_vs_rs_hash(ipvs, dest);
94372 }
94373- atomic_set(&dest->conn_flags, conn_flags);
94374+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
94375
94376 /* bind the service */
94377 if (!dest->svc) {
94378@@ -1657,7 +1657,7 @@ proc_do_sync_ports(ctl_table *table, int write,
94379 * align with netns init in ip_vs_control_net_init()
94380 */
94381
94382-static struct ctl_table vs_vars[] = {
94383+static ctl_table_no_const vs_vars[] __read_only = {
94384 {
94385 .procname = "amemthresh",
94386 .maxlen = sizeof(int),
94387@@ -2060,7 +2060,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
94388 " %-7s %-6d %-10d %-10d\n",
94389 &dest->addr.in6,
94390 ntohs(dest->port),
94391- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
94392+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
94393 atomic_read(&dest->weight),
94394 atomic_read(&dest->activeconns),
94395 atomic_read(&dest->inactconns));
94396@@ -2071,7 +2071,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
94397 "%-7s %-6d %-10d %-10d\n",
94398 ntohl(dest->addr.ip),
94399 ntohs(dest->port),
94400- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
94401+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
94402 atomic_read(&dest->weight),
94403 atomic_read(&dest->activeconns),
94404 atomic_read(&dest->inactconns));
94405@@ -2549,7 +2549,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
94406
94407 entry.addr = dest->addr.ip;
94408 entry.port = dest->port;
94409- entry.conn_flags = atomic_read(&dest->conn_flags);
94410+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
94411 entry.weight = atomic_read(&dest->weight);
94412 entry.u_threshold = dest->u_threshold;
94413 entry.l_threshold = dest->l_threshold;
94414@@ -3092,7 +3092,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
94415 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
94416 nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
94417 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
94418- (atomic_read(&dest->conn_flags) &
94419+ (atomic_read_unchecked(&dest->conn_flags) &
94420 IP_VS_CONN_F_FWD_MASK)) ||
94421 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
94422 atomic_read(&dest->weight)) ||
94423@@ -3682,7 +3682,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
94424 {
94425 int idx;
94426 struct netns_ipvs *ipvs = net_ipvs(net);
94427- struct ctl_table *tbl;
94428+ ctl_table_no_const *tbl;
94429
94430 atomic_set(&ipvs->dropentry, 0);
94431 spin_lock_init(&ipvs->dropentry_lock);
94432diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
94433index 5ea26bd..c9bc65f 100644
94434--- a/net/netfilter/ipvs/ip_vs_lblc.c
94435+++ b/net/netfilter/ipvs/ip_vs_lblc.c
94436@@ -118,7 +118,7 @@ struct ip_vs_lblc_table {
94437 * IPVS LBLC sysctl table
94438 */
94439 #ifdef CONFIG_SYSCTL
94440-static ctl_table vs_vars_table[] = {
94441+static ctl_table_no_const vs_vars_table[] __read_only = {
94442 {
94443 .procname = "lblc_expiration",
94444 .data = NULL,
94445diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
94446index 50123c2..067c773 100644
94447--- a/net/netfilter/ipvs/ip_vs_lblcr.c
94448+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
94449@@ -299,7 +299,7 @@ struct ip_vs_lblcr_table {
94450 * IPVS LBLCR sysctl table
94451 */
94452
94453-static ctl_table vs_vars_table[] = {
94454+static ctl_table_no_const vs_vars_table[] __read_only = {
94455 {
94456 .procname = "lblcr_expiration",
94457 .data = NULL,
94458diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
94459index f6046d9..4f10cfd 100644
94460--- a/net/netfilter/ipvs/ip_vs_sync.c
94461+++ b/net/netfilter/ipvs/ip_vs_sync.c
94462@@ -596,7 +596,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
94463 cp = cp->control;
94464 if (cp) {
94465 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
94466- pkts = atomic_add_return(1, &cp->in_pkts);
94467+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
94468 else
94469 pkts = sysctl_sync_threshold(ipvs);
94470 ip_vs_sync_conn(net, cp->control, pkts);
94471@@ -758,7 +758,7 @@ control:
94472 if (!cp)
94473 return;
94474 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
94475- pkts = atomic_add_return(1, &cp->in_pkts);
94476+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
94477 else
94478 pkts = sysctl_sync_threshold(ipvs);
94479 goto sloop;
94480@@ -882,7 +882,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
94481
94482 if (opt)
94483 memcpy(&cp->in_seq, opt, sizeof(*opt));
94484- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
94485+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
94486 cp->state = state;
94487 cp->old_state = cp->state;
94488 /*
94489diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
94490index b75ff64..0c51bbe 100644
94491--- a/net/netfilter/ipvs/ip_vs_xmit.c
94492+++ b/net/netfilter/ipvs/ip_vs_xmit.c
94493@@ -1102,7 +1102,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
94494 else
94495 rc = NF_ACCEPT;
94496 /* do not touch skb anymore */
94497- atomic_inc(&cp->in_pkts);
94498+ atomic_inc_unchecked(&cp->in_pkts);
94499 goto out;
94500 }
94501
94502@@ -1194,7 +1194,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
94503 else
94504 rc = NF_ACCEPT;
94505 /* do not touch skb anymore */
94506- atomic_inc(&cp->in_pkts);
94507+ atomic_inc_unchecked(&cp->in_pkts);
94508 goto out;
94509 }
94510
94511diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
94512index 2d3030a..7ba1c0a 100644
94513--- a/net/netfilter/nf_conntrack_acct.c
94514+++ b/net/netfilter/nf_conntrack_acct.c
94515@@ -60,7 +60,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
94516 #ifdef CONFIG_SYSCTL
94517 static int nf_conntrack_acct_init_sysctl(struct net *net)
94518 {
94519- struct ctl_table *table;
94520+ ctl_table_no_const *table;
94521
94522 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
94523 GFP_KERNEL);
94524diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
94525index 0283bae..5febcb0 100644
94526--- a/net/netfilter/nf_conntrack_core.c
94527+++ b/net/netfilter/nf_conntrack_core.c
94528@@ -1614,6 +1614,10 @@ void nf_conntrack_init_end(void)
94529 #define DYING_NULLS_VAL ((1<<30)+1)
94530 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
94531
94532+#ifdef CONFIG_GRKERNSEC_HIDESYM
94533+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
94534+#endif
94535+
94536 int nf_conntrack_init_net(struct net *net)
94537 {
94538 int ret;
94539@@ -1628,7 +1632,11 @@ int nf_conntrack_init_net(struct net *net)
94540 goto err_stat;
94541 }
94542
94543+#ifdef CONFIG_GRKERNSEC_HIDESYM
94544+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08lx", atomic_inc_return_unchecked(&conntrack_cache_id));
94545+#else
94546 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
94547+#endif
94548 if (!net->ct.slabname) {
94549 ret = -ENOMEM;
94550 goto err_slabname;
94551diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
94552index 1df1761..ce8b88a 100644
94553--- a/net/netfilter/nf_conntrack_ecache.c
94554+++ b/net/netfilter/nf_conntrack_ecache.c
94555@@ -188,7 +188,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
94556 #ifdef CONFIG_SYSCTL
94557 static int nf_conntrack_event_init_sysctl(struct net *net)
94558 {
94559- struct ctl_table *table;
94560+ ctl_table_no_const *table;
94561
94562 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
94563 GFP_KERNEL);
94564diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
94565index 974a2a4..52cc6ff 100644
94566--- a/net/netfilter/nf_conntrack_helper.c
94567+++ b/net/netfilter/nf_conntrack_helper.c
94568@@ -57,7 +57,7 @@ static struct ctl_table helper_sysctl_table[] = {
94569
94570 static int nf_conntrack_helper_init_sysctl(struct net *net)
94571 {
94572- struct ctl_table *table;
94573+ ctl_table_no_const *table;
94574
94575 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
94576 GFP_KERNEL);
94577diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
94578index 0ab9636..cea3c6a 100644
94579--- a/net/netfilter/nf_conntrack_proto.c
94580+++ b/net/netfilter/nf_conntrack_proto.c
94581@@ -52,7 +52,7 @@ nf_ct_register_sysctl(struct net *net,
94582
94583 static void
94584 nf_ct_unregister_sysctl(struct ctl_table_header **header,
94585- struct ctl_table **table,
94586+ ctl_table_no_const **table,
94587 unsigned int users)
94588 {
94589 if (users > 0)
94590diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
e2b79cd1 94591index a99b6c3..cb372f9 100644
bb5f0bf8
AF
94592--- a/net/netfilter/nf_conntrack_proto_dccp.c
94593+++ b/net/netfilter/nf_conntrack_proto_dccp.c
e2b79cd1
AF
94594@@ -428,7 +428,7 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
94595 const char *msg;
94596 u_int8_t state;
94597
94598- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
94599+ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
94600 BUG_ON(dh == NULL);
94601
94602 state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE];
bb5f0bf8
AF
94603@@ -457,7 +457,7 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
94604 out_invalid:
94605 if (LOG_INVALID(net, IPPROTO_DCCP))
94606 nf_log_packet(net, nf_ct_l3num(ct), 0, skb, NULL, NULL,
94607- NULL, msg);
94608+ NULL, "%s", msg);
94609 return false;
94610 }
94611
e2b79cd1
AF
94612@@ -486,7 +486,7 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
94613 u_int8_t type, old_state, new_state;
94614 enum ct_dccp_roles role;
94615
94616- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
94617+ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
94618 BUG_ON(dh == NULL);
94619 type = dh->dccph_type;
94620
94621@@ -577,7 +577,7 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
94622 unsigned int cscov;
94623 const char *msg;
94624
94625- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
94626+ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
94627 if (dh == NULL) {
94628 msg = "nf_ct_dccp: short packet ";
94629 goto out_invalid;
bb5f0bf8
AF
94630@@ -614,7 +614,7 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
94631
94632 out_invalid:
94633 if (LOG_INVALID(net, IPPROTO_DCCP))
94634- nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, msg);
94635+ nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "%s", msg);
94636 return -NF_ACCEPT;
94637 }
94638
94639diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
94640index 4d4d8f1..e0f9a32 100644
94641--- a/net/netfilter/nf_conntrack_proto_tcp.c
94642+++ b/net/netfilter/nf_conntrack_proto_tcp.c
94643@@ -526,7 +526,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
94644 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
94645 __u32 seq, ack, sack, end, win, swin;
94646 s16 receiver_offset;
94647- bool res;
94648+ bool res, in_recv_win;
94649
94650 /*
94651 * Get the required data from the packet.
94652@@ -649,14 +649,18 @@ static bool tcp_in_window(const struct nf_conn *ct,
94653 receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
94654 receiver->td_scale);
94655
94656+ /* Is the ending sequence in the receive window (if available)? */
94657+ in_recv_win = !receiver->td_maxwin ||
94658+ after(end, sender->td_end - receiver->td_maxwin - 1);
94659+
94660 pr_debug("tcp_in_window: I=%i II=%i III=%i IV=%i\n",
94661 before(seq, sender->td_maxend + 1),
94662- after(end, sender->td_end - receiver->td_maxwin - 1),
94663+ (in_recv_win ? 1 : 0),
94664 before(sack, receiver->td_end + 1),
94665 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1));
94666
94667 if (before(seq, sender->td_maxend + 1) &&
94668- after(end, sender->td_end - receiver->td_maxwin - 1) &&
94669+ in_recv_win &&
94670 before(sack, receiver->td_end + 1) &&
94671 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) {
94672 /*
94673@@ -725,7 +729,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
94674 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
94675 "nf_ct_tcp: %s ",
94676 before(seq, sender->td_maxend + 1) ?
94677- after(end, sender->td_end - receiver->td_maxwin - 1) ?
94678+ in_recv_win ?
94679 before(sack, receiver->td_end + 1) ?
94680 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG"
94681 : "ACK is under the lower bound (possible overly delayed ACK)"
94682diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
94683index bd700b4..4a3dc61 100644
94684--- a/net/netfilter/nf_conntrack_standalone.c
94685+++ b/net/netfilter/nf_conntrack_standalone.c
94686@@ -471,7 +471,7 @@ static ctl_table nf_ct_netfilter_table[] = {
94687
94688 static int nf_conntrack_standalone_init_sysctl(struct net *net)
94689 {
94690- struct ctl_table *table;
94691+ ctl_table_no_const *table;
94692
94693 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
94694 GFP_KERNEL);
94695diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
94696index 902fb0a..87f7fdb 100644
94697--- a/net/netfilter/nf_conntrack_timestamp.c
94698+++ b/net/netfilter/nf_conntrack_timestamp.c
94699@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
94700 #ifdef CONFIG_SYSCTL
94701 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
94702 {
94703- struct ctl_table *table;
94704+ ctl_table_no_const *table;
94705
94706 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
94707 GFP_KERNEL);
94708diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
94709index 3b18dd1..f79e0ca 100644
94710--- a/net/netfilter/nf_log.c
94711+++ b/net/netfilter/nf_log.c
94712@@ -243,7 +243,7 @@ static const struct file_operations nflog_file_ops = {
94713
94714 #ifdef CONFIG_SYSCTL
94715 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
94716-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
94717+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
94718
94719 static int nf_log_proc_dostring(ctl_table *table, int write,
94720 void __user *buffer, size_t *lenp, loff_t *ppos)
94721@@ -274,14 +274,16 @@ static int nf_log_proc_dostring(ctl_table *table, int write,
94722 rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
94723 mutex_unlock(&nf_log_mutex);
94724 } else {
94725+ ctl_table_no_const nf_log_table = *table;
94726+
94727 mutex_lock(&nf_log_mutex);
94728 logger = rcu_dereference_protected(net->nf.nf_loggers[tindex],
94729 lockdep_is_held(&nf_log_mutex));
94730 if (!logger)
94731- table->data = "NONE";
94732+ nf_log_table.data = "NONE";
94733 else
94734- table->data = logger->name;
94735- r = proc_dostring(table, write, buffer, lenp, ppos);
94736+ nf_log_table.data = logger->name;
94737+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
94738 mutex_unlock(&nf_log_mutex);
94739 }
94740
94741diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
94742index f042ae5..30ea486 100644
94743--- a/net/netfilter/nf_sockopt.c
94744+++ b/net/netfilter/nf_sockopt.c
94745@@ -45,7 +45,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
94746 }
94747 }
94748
94749- list_add(&reg->list, &nf_sockopts);
94750+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
94751 out:
94752 mutex_unlock(&nf_sockopt_mutex);
94753 return ret;
94754@@ -55,7 +55,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
94755 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
94756 {
94757 mutex_lock(&nf_sockopt_mutex);
94758- list_del(&reg->list);
94759+ pax_list_del((struct list_head *)&reg->list);
94760 mutex_unlock(&nf_sockopt_mutex);
94761 }
94762 EXPORT_SYMBOL(nf_unregister_sockopt);
94763diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
94764index 962e979..e46f350 100644
94765--- a/net/netfilter/nfnetlink_log.c
94766+++ b/net/netfilter/nfnetlink_log.c
94767@@ -82,7 +82,7 @@ static int nfnl_log_net_id __read_mostly;
94768 struct nfnl_log_net {
94769 spinlock_t instances_lock;
94770 struct hlist_head instance_table[INSTANCE_BUCKETS];
94771- atomic_t global_seq;
94772+ atomic_unchecked_t global_seq;
94773 };
94774
94775 static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
94776@@ -419,6 +419,7 @@ __build_packet_message(struct nfnl_log_net *log,
94777 nfmsg->version = NFNETLINK_V0;
94778 nfmsg->res_id = htons(inst->group_num);
94779
94780+ memset(&pmsg, 0, sizeof(pmsg));
94781 pmsg.hw_protocol = skb->protocol;
94782 pmsg.hook = hooknum;
94783
94784@@ -498,7 +499,10 @@ __build_packet_message(struct nfnl_log_net *log,
94785 if (indev && skb->dev &&
94786 skb->mac_header != skb->network_header) {
94787 struct nfulnl_msg_packet_hw phw;
94788- int len = dev_parse_header(skb, phw.hw_addr);
94789+ int len;
94790+
94791+ memset(&phw, 0, sizeof(phw));
94792+ len = dev_parse_header(skb, phw.hw_addr);
94793 if (len > 0) {
94794 phw.hw_addrlen = htons(len);
94795 if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw))
94796@@ -559,7 +563,7 @@ __build_packet_message(struct nfnl_log_net *log,
94797 /* global sequence number */
94798 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
94799 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
94800- htonl(atomic_inc_return(&log->global_seq))))
94801+ htonl(atomic_inc_return_unchecked(&log->global_seq))))
94802 goto nla_put_failure;
94803
94804 if (data_len) {
94805diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
94806index 5352b2d..e0083ce 100644
94807--- a/net/netfilter/nfnetlink_queue_core.c
94808+++ b/net/netfilter/nfnetlink_queue_core.c
94809@@ -444,7 +444,10 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
94810 if (indev && entskb->dev &&
94811 entskb->mac_header != entskb->network_header) {
94812 struct nfqnl_msg_packet_hw phw;
94813- int len = dev_parse_header(entskb, phw.hw_addr);
94814+ int len;
94815+
94816+ memset(&phw, 0, sizeof(phw));
94817+ len = dev_parse_header(entskb, phw.hw_addr);
94818 if (len) {
94819 phw.hw_addrlen = htons(len);
94820 if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw))
94821diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
94822index 7011c71..6113cc7 100644
94823--- a/net/netfilter/xt_TCPMSS.c
94824+++ b/net/netfilter/xt_TCPMSS.c
94825@@ -52,7 +52,8 @@ tcpmss_mangle_packet(struct sk_buff *skb,
94826 {
94827 const struct xt_tcpmss_info *info = par->targinfo;
94828 struct tcphdr *tcph;
94829- unsigned int tcplen, i;
94830+ int len, tcp_hdrlen;
94831+ unsigned int i;
94832 __be16 oldval;
94833 u16 newmss;
94834 u8 *opt;
94835@@ -64,11 +65,14 @@ tcpmss_mangle_packet(struct sk_buff *skb,
94836 if (!skb_make_writable(skb, skb->len))
94837 return -1;
94838
94839- tcplen = skb->len - tcphoff;
94840+ len = skb->len - tcphoff;
94841+ if (len < (int)sizeof(struct tcphdr))
94842+ return -1;
94843+
94844 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
94845+ tcp_hdrlen = tcph->doff * 4;
94846
94847- /* Header cannot be larger than the packet */
94848- if (tcplen < tcph->doff*4)
94849+ if (len < tcp_hdrlen)
94850 return -1;
94851
94852 if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
94853@@ -87,9 +91,8 @@ tcpmss_mangle_packet(struct sk_buff *skb,
94854 newmss = info->mss;
94855
94856 opt = (u_int8_t *)tcph;
94857- for (i = sizeof(struct tcphdr); i < tcph->doff*4; i += optlen(opt, i)) {
94858- if (opt[i] == TCPOPT_MSS && tcph->doff*4 - i >= TCPOLEN_MSS &&
94859- opt[i+1] == TCPOLEN_MSS) {
94860+ for (i = sizeof(struct tcphdr); i <= tcp_hdrlen - TCPOLEN_MSS; i += optlen(opt, i)) {
94861+ if (opt[i] == TCPOPT_MSS && opt[i+1] == TCPOLEN_MSS) {
94862 u_int16_t oldmss;
94863
94864 oldmss = (opt[i+2] << 8) | opt[i+3];
94865@@ -112,9 +115,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
94866 }
94867
94868 /* There is data after the header so the option can't be added
94869- without moving it, and doing so may make the SYN packet
94870- itself too large. Accept the packet unmodified instead. */
94871- if (tcplen > tcph->doff*4)
94872+ * without moving it, and doing so may make the SYN packet
94873+ * itself too large. Accept the packet unmodified instead.
94874+ */
94875+ if (len > tcp_hdrlen)
94876 return 0;
94877
94878 /*
94879@@ -143,10 +147,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
94880 newmss = min(newmss, (u16)1220);
94881
94882 opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
94883- memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr));
94884+ memmove(opt + TCPOLEN_MSS, opt, len - sizeof(struct tcphdr));
94885
94886 inet_proto_csum_replace2(&tcph->check, skb,
94887- htons(tcplen), htons(tcplen + TCPOLEN_MSS), 1);
94888+ htons(len), htons(len + TCPOLEN_MSS), 1);
94889 opt[0] = TCPOPT_MSS;
94890 opt[1] = TCPOLEN_MSS;
94891 opt[2] = (newmss & 0xff00) >> 8;
94892diff --git a/net/netfilter/xt_TCPOPTSTRIP.c b/net/netfilter/xt_TCPOPTSTRIP.c
94893index b68fa19..625fa1d 100644
94894--- a/net/netfilter/xt_TCPOPTSTRIP.c
94895+++ b/net/netfilter/xt_TCPOPTSTRIP.c
94896@@ -38,7 +38,7 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
94897 struct tcphdr *tcph;
94898 u_int16_t n, o;
94899 u_int8_t *opt;
94900- int len;
94901+ int len, tcp_hdrlen;
94902
94903 /* This is a fragment, no TCP header is available */
94904 if (par->fragoff != 0)
94905@@ -52,7 +52,9 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
94906 return NF_DROP;
94907
94908 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
94909- if (tcph->doff * 4 > len)
94910+ tcp_hdrlen = tcph->doff * 4;
94911+
94912+ if (len < tcp_hdrlen)
94913 return NF_DROP;
94914
94915 opt = (u_int8_t *)tcph;
94916@@ -61,10 +63,10 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
94917 * Walk through all TCP options - if we find some option to remove,
94918 * set all octets to %TCPOPT_NOP and adjust checksum.
94919 */
94920- for (i = sizeof(struct tcphdr); i < tcp_hdrlen(skb); i += optl) {
94921+ for (i = sizeof(struct tcphdr); i < tcp_hdrlen - 1; i += optl) {
94922 optl = optlen(opt, i);
94923
94924- if (i + optl > tcp_hdrlen(skb))
94925+ if (i + optl > tcp_hdrlen)
94926 break;
94927
94928 if (!tcpoptstrip_test_bit(info->strip_bmap, opt[i]))
94929diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
94930new file mode 100644
94931index 0000000..c566332
94932--- /dev/null
94933+++ b/net/netfilter/xt_gradm.c
94934@@ -0,0 +1,51 @@
94935+/*
94936+ * gradm match for netfilter
94937